110be98a7SChris Wilson /* 210be98a7SChris Wilson * SPDX-License-Identifier: MIT 310be98a7SChris Wilson * 410be98a7SChris Wilson * Copyright © 2011-2012 Intel Corporation 510be98a7SChris Wilson */ 610be98a7SChris Wilson 710be98a7SChris Wilson /* 810be98a7SChris Wilson * This file implements HW context support. On gen5+ a HW context consists of an 910be98a7SChris Wilson * opaque GPU object which is referenced at times of context saves and restores. 1010be98a7SChris Wilson * With RC6 enabled, the context is also referenced as the GPU enters and exists 1110be98a7SChris Wilson * from RC6 (GPU has it's own internal power context, except on gen5). Though 1210be98a7SChris Wilson * something like a context does exist for the media ring, the code only 1310be98a7SChris Wilson * supports contexts for the render ring. 1410be98a7SChris Wilson * 1510be98a7SChris Wilson * In software, there is a distinction between contexts created by the user, 1610be98a7SChris Wilson * and the default HW context. The default HW context is used by GPU clients 1710be98a7SChris Wilson * that do not request setup of their own hardware context. The default 1810be98a7SChris Wilson * context's state is never restored to help prevent programming errors. This 1910be98a7SChris Wilson * would happen if a client ran and piggy-backed off another clients GPU state. 2010be98a7SChris Wilson * The default context only exists to give the GPU some offset to load as the 2110be98a7SChris Wilson * current to invoke a save of the context we actually care about. In fact, the 2210be98a7SChris Wilson * code could likely be constructed, albeit in a more complicated fashion, to 2310be98a7SChris Wilson * never use the default context, though that limits the driver's ability to 2410be98a7SChris Wilson * swap out, and/or destroy other contexts. 2510be98a7SChris Wilson * 2610be98a7SChris Wilson * All other contexts are created as a request by the GPU client. These contexts 2710be98a7SChris Wilson * store GPU state, and thus allow GPU clients to not re-emit state (and 2810be98a7SChris Wilson * potentially query certain state) at any time. The kernel driver makes 2910be98a7SChris Wilson * certain that the appropriate commands are inserted. 3010be98a7SChris Wilson * 3110be98a7SChris Wilson * The context life cycle is semi-complicated in that context BOs may live 3210be98a7SChris Wilson * longer than the context itself because of the way the hardware, and object 3310be98a7SChris Wilson * tracking works. Below is a very crude representation of the state machine 3410be98a7SChris Wilson * describing the context life. 3510be98a7SChris Wilson * refcount pincount active 3610be98a7SChris Wilson * S0: initial state 0 0 0 3710be98a7SChris Wilson * S1: context created 1 0 0 3810be98a7SChris Wilson * S2: context is currently running 2 1 X 3910be98a7SChris Wilson * S3: GPU referenced, but not current 2 0 1 4010be98a7SChris Wilson * S4: context is current, but destroyed 1 1 0 4110be98a7SChris Wilson * S5: like S3, but destroyed 1 0 1 4210be98a7SChris Wilson * 4310be98a7SChris Wilson * The most common (but not all) transitions: 4410be98a7SChris Wilson * S0->S1: client creates a context 4510be98a7SChris Wilson * S1->S2: client submits execbuf with context 4610be98a7SChris Wilson * S2->S3: other clients submits execbuf with context 4710be98a7SChris Wilson * S3->S1: context object was retired 4810be98a7SChris Wilson * S3->S2: clients submits another execbuf 4910be98a7SChris Wilson * S2->S4: context destroy called with current context 5010be98a7SChris Wilson * S3->S5->S0: destroy path 5110be98a7SChris Wilson * S4->S5->S0: destroy path on current context 5210be98a7SChris Wilson * 5310be98a7SChris Wilson * There are two confusing terms used above: 5410be98a7SChris Wilson * The "current context" means the context which is currently running on the 5510be98a7SChris Wilson * GPU. The GPU has loaded its state already and has stored away the gtt 5610be98a7SChris Wilson * offset of the BO. The GPU is not actively referencing the data at this 5710be98a7SChris Wilson * offset, but it will on the next context switch. The only way to avoid this 5810be98a7SChris Wilson * is to do a GPU reset. 5910be98a7SChris Wilson * 6010be98a7SChris Wilson * An "active context' is one which was previously the "current context" and is 6110be98a7SChris Wilson * on the active list waiting for the next context switch to occur. Until this 6210be98a7SChris Wilson * happens, the object must remain at the same gtt offset. It is therefore 6310be98a7SChris Wilson * possible to destroy a context, but it is still active. 6410be98a7SChris Wilson * 6510be98a7SChris Wilson */ 6610be98a7SChris Wilson 6710be98a7SChris Wilson #include <linux/log2.h> 6810be98a7SChris Wilson #include <linux/nospec.h> 6910be98a7SChris Wilson 702c86e55dSMatthew Auld #include "gt/gen6_ppgtt.h" 719f3ccd40SChris Wilson #include "gt/intel_context.h" 7288be76cdSChris Wilson #include "gt/intel_context_param.h" 732e0986a5SChris Wilson #include "gt/intel_engine_heartbeat.h" 74750e76b4SChris Wilson #include "gt/intel_engine_user.h" 7570a2b431SChris Wilson #include "gt/intel_execlists_submission.h" /* virtual_engine */ 762871ea85SChris Wilson #include "gt/intel_ring.h" 7710be98a7SChris Wilson 7810be98a7SChris Wilson #include "i915_gem_context.h" 7910be98a7SChris Wilson #include "i915_globals.h" 8010be98a7SChris Wilson #include "i915_trace.h" 8110be98a7SChris Wilson #include "i915_user_extensions.h" 8210be98a7SChris Wilson 8310be98a7SChris Wilson #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 8410be98a7SChris Wilson 8510be98a7SChris Wilson static struct i915_global_gem_context { 8610be98a7SChris Wilson struct i915_global base; 8710be98a7SChris Wilson struct kmem_cache *slab_luts; 8810be98a7SChris Wilson } global; 8910be98a7SChris Wilson 9010be98a7SChris Wilson struct i915_lut_handle *i915_lut_handle_alloc(void) 9110be98a7SChris Wilson { 9210be98a7SChris Wilson return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); 9310be98a7SChris Wilson } 9410be98a7SChris Wilson 9510be98a7SChris Wilson void i915_lut_handle_free(struct i915_lut_handle *lut) 9610be98a7SChris Wilson { 9710be98a7SChris Wilson return kmem_cache_free(global.slab_luts, lut); 9810be98a7SChris Wilson } 9910be98a7SChris Wilson 10010be98a7SChris Wilson static void lut_close(struct i915_gem_context *ctx) 10110be98a7SChris Wilson { 10210be98a7SChris Wilson struct radix_tree_iter iter; 10310be98a7SChris Wilson void __rcu **slot; 10410be98a7SChris Wilson 105f7ce8639SChris Wilson mutex_lock(&ctx->lut_mutex); 10610be98a7SChris Wilson rcu_read_lock(); 10710be98a7SChris Wilson radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 10810be98a7SChris Wilson struct i915_vma *vma = rcu_dereference_raw(*slot); 109155ab883SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 110155ab883SChris Wilson struct i915_lut_handle *lut; 11110be98a7SChris Wilson 112155ab883SChris Wilson if (!kref_get_unless_zero(&obj->base.refcount)) 113155ab883SChris Wilson continue; 114155ab883SChris Wilson 115096a42ddSChris Wilson spin_lock(&obj->lut_lock); 116155ab883SChris Wilson list_for_each_entry(lut, &obj->lut_list, obj_link) { 117155ab883SChris Wilson if (lut->ctx != ctx) 118155ab883SChris Wilson continue; 119155ab883SChris Wilson 120155ab883SChris Wilson if (lut->handle != iter.index) 121155ab883SChris Wilson continue; 122155ab883SChris Wilson 123155ab883SChris Wilson list_del(&lut->obj_link); 124155ab883SChris Wilson break; 125155ab883SChris Wilson } 126096a42ddSChris Wilson spin_unlock(&obj->lut_lock); 127155ab883SChris Wilson 128155ab883SChris Wilson if (&lut->obj_link != &obj->lut_list) { 129155ab883SChris Wilson i915_lut_handle_free(lut); 13010be98a7SChris Wilson radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 131155ab883SChris Wilson i915_vma_close(vma); 132155ab883SChris Wilson i915_gem_object_put(obj); 133155ab883SChris Wilson } 13410be98a7SChris Wilson 135155ab883SChris Wilson i915_gem_object_put(obj); 13610be98a7SChris Wilson } 13710be98a7SChris Wilson rcu_read_unlock(); 138f7ce8639SChris Wilson mutex_unlock(&ctx->lut_mutex); 13910be98a7SChris Wilson } 14010be98a7SChris Wilson 14110be98a7SChris Wilson static struct intel_context * 14210be98a7SChris Wilson lookup_user_engine(struct i915_gem_context *ctx, 14310be98a7SChris Wilson unsigned long flags, 14410be98a7SChris Wilson const struct i915_engine_class_instance *ci) 14510be98a7SChris Wilson #define LOOKUP_USER_INDEX BIT(0) 14610be98a7SChris Wilson { 14710be98a7SChris Wilson int idx; 14810be98a7SChris Wilson 14910be98a7SChris Wilson if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 15010be98a7SChris Wilson return ERR_PTR(-EINVAL); 15110be98a7SChris Wilson 15210be98a7SChris Wilson if (!i915_gem_context_user_engines(ctx)) { 15310be98a7SChris Wilson struct intel_engine_cs *engine; 15410be98a7SChris Wilson 15510be98a7SChris Wilson engine = intel_engine_lookup_user(ctx->i915, 15610be98a7SChris Wilson ci->engine_class, 15710be98a7SChris Wilson ci->engine_instance); 15810be98a7SChris Wilson if (!engine) 15910be98a7SChris Wilson return ERR_PTR(-EINVAL); 16010be98a7SChris Wilson 161f1c4d157SChris Wilson idx = engine->legacy_idx; 16210be98a7SChris Wilson } else { 16310be98a7SChris Wilson idx = ci->engine_instance; 16410be98a7SChris Wilson } 16510be98a7SChris Wilson 16610be98a7SChris Wilson return i915_gem_context_get_engine(ctx, idx); 16710be98a7SChris Wilson } 16810be98a7SChris Wilson 16927dbae8fSChris Wilson static struct i915_address_space * 17027dbae8fSChris Wilson context_get_vm_rcu(struct i915_gem_context *ctx) 17127dbae8fSChris Wilson { 17227dbae8fSChris Wilson GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); 17327dbae8fSChris Wilson 17427dbae8fSChris Wilson do { 17527dbae8fSChris Wilson struct i915_address_space *vm; 17627dbae8fSChris Wilson 17727dbae8fSChris Wilson /* 17827dbae8fSChris Wilson * We do not allow downgrading from full-ppgtt [to a shared 17927dbae8fSChris Wilson * global gtt], so ctx->vm cannot become NULL. 18027dbae8fSChris Wilson */ 18127dbae8fSChris Wilson vm = rcu_dereference(ctx->vm); 18227dbae8fSChris Wilson if (!kref_get_unless_zero(&vm->ref)) 18327dbae8fSChris Wilson continue; 18427dbae8fSChris Wilson 18527dbae8fSChris Wilson /* 18627dbae8fSChris Wilson * This ppgtt may have be reallocated between 18727dbae8fSChris Wilson * the read and the kref, and reassigned to a third 18827dbae8fSChris Wilson * context. In order to avoid inadvertent sharing 18927dbae8fSChris Wilson * of this ppgtt with that third context (and not 19027dbae8fSChris Wilson * src), we have to confirm that we have the same 19127dbae8fSChris Wilson * ppgtt after passing through the strong memory 19227dbae8fSChris Wilson * barrier implied by a successful 19327dbae8fSChris Wilson * kref_get_unless_zero(). 19427dbae8fSChris Wilson * 19527dbae8fSChris Wilson * Once we have acquired the current ppgtt of ctx, 19627dbae8fSChris Wilson * we no longer care if it is released from ctx, as 19727dbae8fSChris Wilson * it cannot be reallocated elsewhere. 19827dbae8fSChris Wilson */ 19927dbae8fSChris Wilson 20027dbae8fSChris Wilson if (vm == rcu_access_pointer(ctx->vm)) 20127dbae8fSChris Wilson return rcu_pointer_handoff(vm); 20227dbae8fSChris Wilson 20327dbae8fSChris Wilson i915_vm_put(vm); 20427dbae8fSChris Wilson } while (1); 20527dbae8fSChris Wilson } 20627dbae8fSChris Wilson 207e6ba7648SChris Wilson static void intel_context_set_gem(struct intel_context *ce, 208e6ba7648SChris Wilson struct i915_gem_context *ctx) 209e6ba7648SChris Wilson { 2106a8679c0SChris Wilson GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 2116a8679c0SChris Wilson RCU_INIT_POINTER(ce->gem_context, ctx); 212e6ba7648SChris Wilson 213e6ba7648SChris Wilson if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 214e6ba7648SChris Wilson ce->ring = __intel_context_ring_size(SZ_16K); 215e6ba7648SChris Wilson 216e6ba7648SChris Wilson if (rcu_access_pointer(ctx->vm)) { 217e6ba7648SChris Wilson struct i915_address_space *vm; 218e6ba7648SChris Wilson 219e6ba7648SChris Wilson rcu_read_lock(); 220e6ba7648SChris Wilson vm = context_get_vm_rcu(ctx); /* hmm */ 221e6ba7648SChris Wilson rcu_read_unlock(); 222e6ba7648SChris Wilson 223e6ba7648SChris Wilson i915_vm_put(ce->vm); 224e6ba7648SChris Wilson ce->vm = vm; 225e6ba7648SChris Wilson } 226e6ba7648SChris Wilson 227e6ba7648SChris Wilson GEM_BUG_ON(ce->timeline); 228e6ba7648SChris Wilson if (ctx->timeline) 229e6ba7648SChris Wilson ce->timeline = intel_timeline_get(ctx->timeline); 230e6ba7648SChris Wilson 231e6ba7648SChris Wilson if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 2320eb670aaSChris Wilson intel_engine_has_timeslices(ce->engine)) 233e6ba7648SChris Wilson __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 234e6ba7648SChris Wilson } 235e6ba7648SChris Wilson 23610be98a7SChris Wilson static void __free_engines(struct i915_gem_engines *e, unsigned int count) 23710be98a7SChris Wilson { 23810be98a7SChris Wilson while (count--) { 23910be98a7SChris Wilson if (!e->engines[count]) 24010be98a7SChris Wilson continue; 24110be98a7SChris Wilson 24210be98a7SChris Wilson intel_context_put(e->engines[count]); 24310be98a7SChris Wilson } 24410be98a7SChris Wilson kfree(e); 24510be98a7SChris Wilson } 24610be98a7SChris Wilson 24710be98a7SChris Wilson static void free_engines(struct i915_gem_engines *e) 24810be98a7SChris Wilson { 24910be98a7SChris Wilson __free_engines(e, e->num_engines); 25010be98a7SChris Wilson } 25110be98a7SChris Wilson 252155ab883SChris Wilson static void free_engines_rcu(struct rcu_head *rcu) 25310be98a7SChris Wilson { 254130a95e9SChris Wilson struct i915_gem_engines *engines = 255130a95e9SChris Wilson container_of(rcu, struct i915_gem_engines, rcu); 256130a95e9SChris Wilson 257130a95e9SChris Wilson i915_sw_fence_fini(&engines->fence); 258130a95e9SChris Wilson free_engines(engines); 25910be98a7SChris Wilson } 26010be98a7SChris Wilson 26170c96e39SChris Wilson static int __i915_sw_fence_call 26270c96e39SChris Wilson engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 26370c96e39SChris Wilson { 26470c96e39SChris Wilson struct i915_gem_engines *engines = 26570c96e39SChris Wilson container_of(fence, typeof(*engines), fence); 26670c96e39SChris Wilson 26770c96e39SChris Wilson switch (state) { 26870c96e39SChris Wilson case FENCE_COMPLETE: 26970c96e39SChris Wilson if (!list_empty(&engines->link)) { 27070c96e39SChris Wilson struct i915_gem_context *ctx = engines->ctx; 27170c96e39SChris Wilson unsigned long flags; 27270c96e39SChris Wilson 27370c96e39SChris Wilson spin_lock_irqsave(&ctx->stale.lock, flags); 27470c96e39SChris Wilson list_del(&engines->link); 27570c96e39SChris Wilson spin_unlock_irqrestore(&ctx->stale.lock, flags); 27670c96e39SChris Wilson } 27770c96e39SChris Wilson i915_gem_context_put(engines->ctx); 27870c96e39SChris Wilson break; 27970c96e39SChris Wilson 28070c96e39SChris Wilson case FENCE_FREE: 28170c96e39SChris Wilson init_rcu_head(&engines->rcu); 28270c96e39SChris Wilson call_rcu(&engines->rcu, free_engines_rcu); 28370c96e39SChris Wilson break; 28470c96e39SChris Wilson } 28570c96e39SChris Wilson 28670c96e39SChris Wilson return NOTIFY_DONE; 28770c96e39SChris Wilson } 28870c96e39SChris Wilson 28970c96e39SChris Wilson static struct i915_gem_engines *alloc_engines(unsigned int count) 29070c96e39SChris Wilson { 29170c96e39SChris Wilson struct i915_gem_engines *e; 29270c96e39SChris Wilson 29370c96e39SChris Wilson e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); 29470c96e39SChris Wilson if (!e) 29570c96e39SChris Wilson return NULL; 29670c96e39SChris Wilson 29770c96e39SChris Wilson i915_sw_fence_init(&e->fence, engines_notify); 29870c96e39SChris Wilson return e; 29970c96e39SChris Wilson } 30070c96e39SChris Wilson 30110be98a7SChris Wilson static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) 30210be98a7SChris Wilson { 303f1c4d157SChris Wilson const struct intel_gt *gt = &ctx->i915->gt; 30410be98a7SChris Wilson struct intel_engine_cs *engine; 30510be98a7SChris Wilson struct i915_gem_engines *e; 30610be98a7SChris Wilson enum intel_engine_id id; 30710be98a7SChris Wilson 30870c96e39SChris Wilson e = alloc_engines(I915_NUM_ENGINES); 30910be98a7SChris Wilson if (!e) 31010be98a7SChris Wilson return ERR_PTR(-ENOMEM); 31110be98a7SChris Wilson 312f1c4d157SChris Wilson for_each_engine(engine, gt, id) { 31310be98a7SChris Wilson struct intel_context *ce; 31410be98a7SChris Wilson 315a50134b1STvrtko Ursulin if (engine->legacy_idx == INVALID_ENGINE) 316a50134b1STvrtko Ursulin continue; 317a50134b1STvrtko Ursulin 318a50134b1STvrtko Ursulin GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 319a50134b1STvrtko Ursulin GEM_BUG_ON(e->engines[engine->legacy_idx]); 320a50134b1STvrtko Ursulin 321e6ba7648SChris Wilson ce = intel_context_create(engine); 32210be98a7SChris Wilson if (IS_ERR(ce)) { 323a50134b1STvrtko Ursulin __free_engines(e, e->num_engines + 1); 32410be98a7SChris Wilson return ERR_CAST(ce); 32510be98a7SChris Wilson } 32610be98a7SChris Wilson 327e6ba7648SChris Wilson intel_context_set_gem(ce, ctx); 328e6ba7648SChris Wilson 329a50134b1STvrtko Ursulin e->engines[engine->legacy_idx] = ce; 330a50134b1STvrtko Ursulin e->num_engines = max(e->num_engines, engine->legacy_idx); 33110be98a7SChris Wilson } 332a50134b1STvrtko Ursulin e->num_engines++; 33310be98a7SChris Wilson 33410be98a7SChris Wilson return e; 33510be98a7SChris Wilson } 33610be98a7SChris Wilson 337*f8246cf4SChris Wilson void i915_gem_context_release(struct kref *ref) 33810be98a7SChris Wilson { 339*f8246cf4SChris Wilson struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 34010be98a7SChris Wilson 341*f8246cf4SChris Wilson trace_i915_context_free(ctx); 342*f8246cf4SChris Wilson GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 343a4e7ccdaSChris Wilson 34410be98a7SChris Wilson mutex_destroy(&ctx->engines_mutex); 345f7ce8639SChris Wilson mutex_destroy(&ctx->lut_mutex); 34610be98a7SChris Wilson 34710be98a7SChris Wilson if (ctx->timeline) 348f0c02c1bSTvrtko Ursulin intel_timeline_put(ctx->timeline); 34910be98a7SChris Wilson 35010be98a7SChris Wilson put_pid(ctx->pid); 35110be98a7SChris Wilson mutex_destroy(&ctx->mutex); 35210be98a7SChris Wilson 35310be98a7SChris Wilson kfree_rcu(ctx, rcu); 35410be98a7SChris Wilson } 35510be98a7SChris Wilson 3562e0986a5SChris Wilson static inline struct i915_gem_engines * 3572e0986a5SChris Wilson __context_engines_static(const struct i915_gem_context *ctx) 3582e0986a5SChris Wilson { 3592e0986a5SChris Wilson return rcu_dereference_protected(ctx->engines, true); 3602e0986a5SChris Wilson } 3612e0986a5SChris Wilson 3622e0986a5SChris Wilson static void __reset_context(struct i915_gem_context *ctx, 3632e0986a5SChris Wilson struct intel_engine_cs *engine) 3642e0986a5SChris Wilson { 3652e0986a5SChris Wilson intel_gt_handle_error(engine->gt, engine->mask, 0, 3662e0986a5SChris Wilson "context closure in %s", ctx->name); 3672e0986a5SChris Wilson } 3682e0986a5SChris Wilson 3692e0986a5SChris Wilson static bool __cancel_engine(struct intel_engine_cs *engine) 3702e0986a5SChris Wilson { 3712e0986a5SChris Wilson /* 3722e0986a5SChris Wilson * Send a "high priority pulse" down the engine to cause the 3732e0986a5SChris Wilson * current request to be momentarily preempted. (If it fails to 3742e0986a5SChris Wilson * be preempted, it will be reset). As we have marked our context 3752e0986a5SChris Wilson * as banned, any incomplete request, including any running, will 3762e0986a5SChris Wilson * be skipped following the preemption. 3772e0986a5SChris Wilson * 3782e0986a5SChris Wilson * If there is no hangchecking (one of the reasons why we try to 3792e0986a5SChris Wilson * cancel the context) and no forced preemption, there may be no 3802e0986a5SChris Wilson * means by which we reset the GPU and evict the persistent hog. 3812e0986a5SChris Wilson * Ergo if we are unable to inject a preemptive pulse that can 3822e0986a5SChris Wilson * kill the banned context, we fallback to doing a local reset 3832e0986a5SChris Wilson * instead. 3842e0986a5SChris Wilson */ 385651dabe2SChris Wilson return intel_engine_pulse(engine) == 0; 3862e0986a5SChris Wilson } 3872e0986a5SChris Wilson 388736e785fSChris Wilson static bool 389736e785fSChris Wilson __active_engine(struct i915_request *rq, struct intel_engine_cs **active) 3902e0986a5SChris Wilson { 3912e0986a5SChris Wilson struct intel_engine_cs *engine, *locked; 392736e785fSChris Wilson bool ret = false; 3932e0986a5SChris Wilson 3942e0986a5SChris Wilson /* 3952e0986a5SChris Wilson * Serialise with __i915_request_submit() so that it sees 3962e0986a5SChris Wilson * is-banned?, or we know the request is already inflight. 397736e785fSChris Wilson * 398736e785fSChris Wilson * Note that rq->engine is unstable, and so we double 399736e785fSChris Wilson * check that we have acquired the lock on the final engine. 4002e0986a5SChris Wilson */ 4012e0986a5SChris Wilson locked = READ_ONCE(rq->engine); 4022e0986a5SChris Wilson spin_lock_irq(&locked->active.lock); 4032e0986a5SChris Wilson while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 4042e0986a5SChris Wilson spin_unlock(&locked->active.lock); 4052e0986a5SChris Wilson locked = engine; 406736e785fSChris Wilson spin_lock(&locked->active.lock); 4072e0986a5SChris Wilson } 4082e0986a5SChris Wilson 4093cfea8c9SChris Wilson if (i915_request_is_active(rq)) { 4103cfea8c9SChris Wilson if (!i915_request_completed(rq)) 411736e785fSChris Wilson *active = locked; 412736e785fSChris Wilson ret = true; 413736e785fSChris Wilson } 4142e0986a5SChris Wilson 4152e0986a5SChris Wilson spin_unlock_irq(&locked->active.lock); 4162e0986a5SChris Wilson 417736e785fSChris Wilson return ret; 4182e0986a5SChris Wilson } 4192e0986a5SChris Wilson 4204a317415SChris Wilson static struct intel_engine_cs *active_engine(struct intel_context *ce) 4214a317415SChris Wilson { 4224a317415SChris Wilson struct intel_engine_cs *engine = NULL; 4234a317415SChris Wilson struct i915_request *rq; 4244a317415SChris Wilson 4254a317415SChris Wilson if (!ce->timeline) 4264a317415SChris Wilson return NULL; 4274a317415SChris Wilson 4283cfea8c9SChris Wilson /* 4293cfea8c9SChris Wilson * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference 4303cfea8c9SChris Wilson * to the request to prevent it being transferred to a new timeline 4313cfea8c9SChris Wilson * (and onto a new timeline->requests list). 4323cfea8c9SChris Wilson */ 433736e785fSChris Wilson rcu_read_lock(); 4343cfea8c9SChris Wilson list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 4353cfea8c9SChris Wilson bool found; 4363cfea8c9SChris Wilson 4373cfea8c9SChris Wilson /* timeline is already completed upto this point? */ 4383cfea8c9SChris Wilson if (!i915_request_get_rcu(rq)) 4393cfea8c9SChris Wilson break; 4404a317415SChris Wilson 4414a317415SChris Wilson /* Check with the backend if the request is inflight */ 4423cfea8c9SChris Wilson found = true; 4433cfea8c9SChris Wilson if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) 4443cfea8c9SChris Wilson found = __active_engine(rq, &engine); 4453cfea8c9SChris Wilson 4463cfea8c9SChris Wilson i915_request_put(rq); 4473cfea8c9SChris Wilson if (found) 4484a317415SChris Wilson break; 4494a317415SChris Wilson } 450736e785fSChris Wilson rcu_read_unlock(); 4514a317415SChris Wilson 4524a317415SChris Wilson return engine; 4534a317415SChris Wilson } 4544a317415SChris Wilson 455651dabe2SChris Wilson static void kill_engines(struct i915_gem_engines *engines, bool ban) 4562e0986a5SChris Wilson { 4572e0986a5SChris Wilson struct i915_gem_engines_iter it; 4582e0986a5SChris Wilson struct intel_context *ce; 4592e0986a5SChris Wilson 4602e0986a5SChris Wilson /* 4612e0986a5SChris Wilson * Map the user's engine back to the actual engines; one virtual 4622e0986a5SChris Wilson * engine will be mapped to multiple engines, and using ctx->engine[] 4632e0986a5SChris Wilson * the same engine may be have multiple instances in the user's map. 4642e0986a5SChris Wilson * However, we only care about pending requests, so only include 4652e0986a5SChris Wilson * engines on which there are incomplete requests. 4662e0986a5SChris Wilson */ 46742fb60deSChris Wilson for_each_gem_engine(ce, engines, it) { 4682e0986a5SChris Wilson struct intel_engine_cs *engine; 4692e0986a5SChris Wilson 470651dabe2SChris Wilson if (ban && intel_context_set_banned(ce)) 4719f3ccd40SChris Wilson continue; 4729f3ccd40SChris Wilson 4734a317415SChris Wilson /* 4744a317415SChris Wilson * Check the current active state of this context; if we 4754a317415SChris Wilson * are currently executing on the GPU we need to evict 4764a317415SChris Wilson * ourselves. On the other hand, if we haven't yet been 4774a317415SChris Wilson * submitted to the GPU or if everything is complete, 4784a317415SChris Wilson * we have nothing to do. 4794a317415SChris Wilson */ 4804a317415SChris Wilson engine = active_engine(ce); 4812e0986a5SChris Wilson 4822e0986a5SChris Wilson /* First attempt to gracefully cancel the context */ 483651dabe2SChris Wilson if (engine && !__cancel_engine(engine) && ban) 4842e0986a5SChris Wilson /* 4852e0986a5SChris Wilson * If we are unable to send a preemptive pulse to bump 4862e0986a5SChris Wilson * the context from the GPU, we have to resort to a full 4872e0986a5SChris Wilson * reset. We hope the collateral damage is worth it. 4882e0986a5SChris Wilson */ 48942fb60deSChris Wilson __reset_context(engines->ctx, engine); 4902e0986a5SChris Wilson } 4912e0986a5SChris Wilson } 4922e0986a5SChris Wilson 493651dabe2SChris Wilson static void kill_context(struct i915_gem_context *ctx) 49442fb60deSChris Wilson { 495651dabe2SChris Wilson bool ban = (!i915_gem_context_is_persistent(ctx) || 496651dabe2SChris Wilson !ctx->i915->params.enable_hangcheck); 49742fb60deSChris Wilson struct i915_gem_engines *pos, *next; 49842fb60deSChris Wilson 499130a95e9SChris Wilson spin_lock_irq(&ctx->stale.lock); 500130a95e9SChris Wilson GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 50142fb60deSChris Wilson list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { 502130a95e9SChris Wilson if (!i915_sw_fence_await(&pos->fence)) { 503130a95e9SChris Wilson list_del_init(&pos->link); 50442fb60deSChris Wilson continue; 505130a95e9SChris Wilson } 50642fb60deSChris Wilson 507130a95e9SChris Wilson spin_unlock_irq(&ctx->stale.lock); 50842fb60deSChris Wilson 509651dabe2SChris Wilson kill_engines(pos, ban); 51042fb60deSChris Wilson 511130a95e9SChris Wilson spin_lock_irq(&ctx->stale.lock); 512130a95e9SChris Wilson GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); 51342fb60deSChris Wilson list_safe_reset_next(pos, next, link); 51442fb60deSChris Wilson list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ 51542fb60deSChris Wilson 51642fb60deSChris Wilson i915_sw_fence_complete(&pos->fence); 51742fb60deSChris Wilson } 518130a95e9SChris Wilson spin_unlock_irq(&ctx->stale.lock); 51942fb60deSChris Wilson } 52042fb60deSChris Wilson 521130a95e9SChris Wilson static void engines_idle_release(struct i915_gem_context *ctx, 522130a95e9SChris Wilson struct i915_gem_engines *engines) 523130a95e9SChris Wilson { 524130a95e9SChris Wilson struct i915_gem_engines_iter it; 525130a95e9SChris Wilson struct intel_context *ce; 526130a95e9SChris Wilson 527130a95e9SChris Wilson INIT_LIST_HEAD(&engines->link); 528130a95e9SChris Wilson 529130a95e9SChris Wilson engines->ctx = i915_gem_context_get(ctx); 530130a95e9SChris Wilson 531130a95e9SChris Wilson for_each_gem_engine(ce, engines, it) { 532e6829625SChris Wilson int err; 533130a95e9SChris Wilson 534130a95e9SChris Wilson /* serialises with execbuf */ 535207e4a71SChris Wilson set_bit(CONTEXT_CLOSED_BIT, &ce->flags); 536130a95e9SChris Wilson if (!intel_context_pin_if_active(ce)) 537130a95e9SChris Wilson continue; 538130a95e9SChris Wilson 539e6829625SChris Wilson /* Wait until context is finally scheduled out and retired */ 540e6829625SChris Wilson err = i915_sw_fence_await_active(&engines->fence, 541e6829625SChris Wilson &ce->active, 542e6829625SChris Wilson I915_ACTIVE_AWAIT_BARRIER); 543130a95e9SChris Wilson intel_context_unpin(ce); 544e6829625SChris Wilson if (err) 545130a95e9SChris Wilson goto kill; 546130a95e9SChris Wilson } 547130a95e9SChris Wilson 548130a95e9SChris Wilson spin_lock_irq(&ctx->stale.lock); 549130a95e9SChris Wilson if (!i915_gem_context_is_closed(ctx)) 550130a95e9SChris Wilson list_add_tail(&engines->link, &ctx->stale.engines); 551130a95e9SChris Wilson spin_unlock_irq(&ctx->stale.lock); 552130a95e9SChris Wilson 553130a95e9SChris Wilson kill: 554130a95e9SChris Wilson if (list_empty(&engines->link)) /* raced, already closed */ 555651dabe2SChris Wilson kill_engines(engines, true); 556130a95e9SChris Wilson 557130a95e9SChris Wilson i915_sw_fence_commit(&engines->fence); 55842fb60deSChris Wilson } 55942fb60deSChris Wilson 560267c0126SChris Wilson static void set_closed_name(struct i915_gem_context *ctx) 561267c0126SChris Wilson { 562267c0126SChris Wilson char *s; 563267c0126SChris Wilson 564267c0126SChris Wilson /* Replace '[]' with '<>' to indicate closed in debug prints */ 565267c0126SChris Wilson 566267c0126SChris Wilson s = strrchr(ctx->name, '['); 567267c0126SChris Wilson if (!s) 568267c0126SChris Wilson return; 569267c0126SChris Wilson 570267c0126SChris Wilson *s = '<'; 571267c0126SChris Wilson 572267c0126SChris Wilson s = strchr(s + 1, ']'); 573267c0126SChris Wilson if (s) 574267c0126SChris Wilson *s = '>'; 575267c0126SChris Wilson } 576267c0126SChris Wilson 57710be98a7SChris Wilson static void context_close(struct i915_gem_context *ctx) 57810be98a7SChris Wilson { 579a4e7ccdaSChris Wilson struct i915_address_space *vm; 580a4e7ccdaSChris Wilson 581130a95e9SChris Wilson /* Flush any concurrent set_engines() */ 582130a95e9SChris Wilson mutex_lock(&ctx->engines_mutex); 583130a95e9SChris Wilson engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); 5842850748eSChris Wilson i915_gem_context_set_closed(ctx); 585130a95e9SChris Wilson mutex_unlock(&ctx->engines_mutex); 5862850748eSChris Wilson 587155ab883SChris Wilson mutex_lock(&ctx->mutex); 588155ab883SChris Wilson 589130a95e9SChris Wilson set_closed_name(ctx); 590130a95e9SChris Wilson 591a4e7ccdaSChris Wilson vm = i915_gem_context_vm(ctx); 592a4e7ccdaSChris Wilson if (vm) 593a4e7ccdaSChris Wilson i915_vm_close(vm); 594a4e7ccdaSChris Wilson 595155ab883SChris Wilson ctx->file_priv = ERR_PTR(-EBADF); 59610be98a7SChris Wilson 59710be98a7SChris Wilson /* 59810be98a7SChris Wilson * The LUT uses the VMA as a backpointer to unref the object, 59910be98a7SChris Wilson * so we need to clear the LUT before we close all the VMA (inside 60010be98a7SChris Wilson * the ppgtt). 60110be98a7SChris Wilson */ 60210be98a7SChris Wilson lut_close(ctx); 60310be98a7SChris Wilson 604*f8246cf4SChris Wilson spin_lock(&ctx->i915->gem.contexts.lock); 605*f8246cf4SChris Wilson list_del(&ctx->link); 606*f8246cf4SChris Wilson spin_unlock(&ctx->i915->gem.contexts.lock); 607*f8246cf4SChris Wilson 608155ab883SChris Wilson mutex_unlock(&ctx->mutex); 6092e0986a5SChris Wilson 6102e0986a5SChris Wilson /* 6112e0986a5SChris Wilson * If the user has disabled hangchecking, we can not be sure that 6122e0986a5SChris Wilson * the batches will ever complete after the context is closed, 6132e0986a5SChris Wilson * keeping the context and all resources pinned forever. So in this 6142e0986a5SChris Wilson * case we opt to forcibly kill off all remaining requests on 6152e0986a5SChris Wilson * context close. 6162e0986a5SChris Wilson */ 6172e0986a5SChris Wilson kill_context(ctx); 6182e0986a5SChris Wilson 61910be98a7SChris Wilson i915_gem_context_put(ctx); 62010be98a7SChris Wilson } 62110be98a7SChris Wilson 622a0e04715SChris Wilson static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 623a0e04715SChris Wilson { 624a0e04715SChris Wilson if (i915_gem_context_is_persistent(ctx) == state) 625a0e04715SChris Wilson return 0; 626a0e04715SChris Wilson 627a0e04715SChris Wilson if (state) { 628a0e04715SChris Wilson /* 629a0e04715SChris Wilson * Only contexts that are short-lived [that will expire or be 630a0e04715SChris Wilson * reset] are allowed to survive past termination. We require 631a0e04715SChris Wilson * hangcheck to ensure that the persistent requests are healthy. 632a0e04715SChris Wilson */ 6338a25c4beSJani Nikula if (!ctx->i915->params.enable_hangcheck) 634a0e04715SChris Wilson return -EINVAL; 635a0e04715SChris Wilson 636a0e04715SChris Wilson i915_gem_context_set_persistence(ctx); 637a0e04715SChris Wilson } else { 638a0e04715SChris Wilson /* To cancel a context we use "preempt-to-idle" */ 639a0e04715SChris Wilson if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 640a0e04715SChris Wilson return -ENODEV; 641a0e04715SChris Wilson 642d1b9b5f1SChris Wilson /* 643d1b9b5f1SChris Wilson * If the cancel fails, we then need to reset, cleanly! 644d1b9b5f1SChris Wilson * 645d1b9b5f1SChris Wilson * If the per-engine reset fails, all hope is lost! We resort 646d1b9b5f1SChris Wilson * to a full GPU reset in that unlikely case, but realistically 647d1b9b5f1SChris Wilson * if the engine could not reset, the full reset does not fare 648d1b9b5f1SChris Wilson * much better. The damage has been done. 649d1b9b5f1SChris Wilson * 650d1b9b5f1SChris Wilson * However, if we cannot reset an engine by itself, we cannot 651d1b9b5f1SChris Wilson * cleanup a hanging persistent context without causing 652d1b9b5f1SChris Wilson * colateral damage, and we should not pretend we can by 653d1b9b5f1SChris Wilson * exposing the interface. 654d1b9b5f1SChris Wilson */ 655d1b9b5f1SChris Wilson if (!intel_has_reset_engine(&ctx->i915->gt)) 656d1b9b5f1SChris Wilson return -ENODEV; 657d1b9b5f1SChris Wilson 658a0e04715SChris Wilson i915_gem_context_clear_persistence(ctx); 659a0e04715SChris Wilson } 660a0e04715SChris Wilson 661a0e04715SChris Wilson return 0; 662a0e04715SChris Wilson } 663a0e04715SChris Wilson 66410be98a7SChris Wilson static struct i915_gem_context * 665e568ac38SChris Wilson __create_context(struct drm_i915_private *i915) 66610be98a7SChris Wilson { 66710be98a7SChris Wilson struct i915_gem_context *ctx; 66810be98a7SChris Wilson struct i915_gem_engines *e; 66910be98a7SChris Wilson int err; 67010be98a7SChris Wilson int i; 67110be98a7SChris Wilson 67210be98a7SChris Wilson ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 67310be98a7SChris Wilson if (!ctx) 67410be98a7SChris Wilson return ERR_PTR(-ENOMEM); 67510be98a7SChris Wilson 67610be98a7SChris Wilson kref_init(&ctx->ref); 677e568ac38SChris Wilson ctx->i915 = i915; 67810be98a7SChris Wilson ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); 67910be98a7SChris Wilson mutex_init(&ctx->mutex); 680eb4dedaeSChris Wilson INIT_LIST_HEAD(&ctx->link); 68110be98a7SChris Wilson 68242fb60deSChris Wilson spin_lock_init(&ctx->stale.lock); 68342fb60deSChris Wilson INIT_LIST_HEAD(&ctx->stale.engines); 68442fb60deSChris Wilson 68510be98a7SChris Wilson mutex_init(&ctx->engines_mutex); 68610be98a7SChris Wilson e = default_engines(ctx); 68710be98a7SChris Wilson if (IS_ERR(e)) { 68810be98a7SChris Wilson err = PTR_ERR(e); 68910be98a7SChris Wilson goto err_free; 69010be98a7SChris Wilson } 69110be98a7SChris Wilson RCU_INIT_POINTER(ctx->engines, e); 69210be98a7SChris Wilson 69310be98a7SChris Wilson INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 694f7ce8639SChris Wilson mutex_init(&ctx->lut_mutex); 69510be98a7SChris Wilson 69610be98a7SChris Wilson /* NB: Mark all slices as needing a remap so that when the context first 69710be98a7SChris Wilson * loads it will restore whatever remap state already exists. If there 69810be98a7SChris Wilson * is no remap info, it will be a NOP. */ 699e568ac38SChris Wilson ctx->remap_slice = ALL_L3_SLICES(i915); 70010be98a7SChris Wilson 70110be98a7SChris Wilson i915_gem_context_set_bannable(ctx); 70210be98a7SChris Wilson i915_gem_context_set_recoverable(ctx); 703a0e04715SChris Wilson __context_set_persistence(ctx, true /* cgroup hook? */); 70410be98a7SChris Wilson 70510be98a7SChris Wilson for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 70610be98a7SChris Wilson ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 70710be98a7SChris Wilson 70810be98a7SChris Wilson return ctx; 70910be98a7SChris Wilson 71010be98a7SChris Wilson err_free: 71110be98a7SChris Wilson kfree(ctx); 71210be98a7SChris Wilson return ERR_PTR(err); 71310be98a7SChris Wilson } 71410be98a7SChris Wilson 71589ff76bfSChris Wilson static inline struct i915_gem_engines * 71689ff76bfSChris Wilson __context_engines_await(const struct i915_gem_context *ctx) 71789ff76bfSChris Wilson { 71889ff76bfSChris Wilson struct i915_gem_engines *engines; 71989ff76bfSChris Wilson 72089ff76bfSChris Wilson rcu_read_lock(); 72189ff76bfSChris Wilson do { 72289ff76bfSChris Wilson engines = rcu_dereference(ctx->engines); 72389ff76bfSChris Wilson GEM_BUG_ON(!engines); 72489ff76bfSChris Wilson 72589ff76bfSChris Wilson if (unlikely(!i915_sw_fence_await(&engines->fence))) 72689ff76bfSChris Wilson continue; 72789ff76bfSChris Wilson 72889ff76bfSChris Wilson if (likely(engines == rcu_access_pointer(ctx->engines))) 72989ff76bfSChris Wilson break; 73089ff76bfSChris Wilson 73189ff76bfSChris Wilson i915_sw_fence_complete(&engines->fence); 73289ff76bfSChris Wilson } while (1); 73389ff76bfSChris Wilson rcu_read_unlock(); 73489ff76bfSChris Wilson 73589ff76bfSChris Wilson return engines; 73689ff76bfSChris Wilson } 73789ff76bfSChris Wilson 73888be76cdSChris Wilson static int 73948ae397bSChris Wilson context_apply_all(struct i915_gem_context *ctx, 74088be76cdSChris Wilson int (*fn)(struct intel_context *ce, void *data), 74148ae397bSChris Wilson void *data) 74248ae397bSChris Wilson { 74348ae397bSChris Wilson struct i915_gem_engines_iter it; 74489ff76bfSChris Wilson struct i915_gem_engines *e; 74548ae397bSChris Wilson struct intel_context *ce; 74688be76cdSChris Wilson int err = 0; 74748ae397bSChris Wilson 74889ff76bfSChris Wilson e = __context_engines_await(ctx); 74989ff76bfSChris Wilson for_each_gem_engine(ce, e, it) { 75088be76cdSChris Wilson err = fn(ce, data); 75188be76cdSChris Wilson if (err) 75288be76cdSChris Wilson break; 75388be76cdSChris Wilson } 75489ff76bfSChris Wilson i915_sw_fence_complete(&e->fence); 75588be76cdSChris Wilson 75688be76cdSChris Wilson return err; 75748ae397bSChris Wilson } 75848ae397bSChris Wilson 75988be76cdSChris Wilson static int __apply_ppgtt(struct intel_context *ce, void *vm) 76048ae397bSChris Wilson { 76148ae397bSChris Wilson i915_vm_put(ce->vm); 76248ae397bSChris Wilson ce->vm = i915_vm_get(vm); 76388be76cdSChris Wilson return 0; 76448ae397bSChris Wilson } 76548ae397bSChris Wilson 766e568ac38SChris Wilson static struct i915_address_space * 767e568ac38SChris Wilson __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) 76810be98a7SChris Wilson { 76989ff76bfSChris Wilson struct i915_address_space *old; 77010be98a7SChris Wilson 77189ff76bfSChris Wilson old = rcu_replace_pointer(ctx->vm, 77289ff76bfSChris Wilson i915_vm_open(vm), 77389ff76bfSChris Wilson lockdep_is_held(&ctx->mutex)); 774a1c9ca22SChris Wilson GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); 775a1c9ca22SChris Wilson 77648ae397bSChris Wilson context_apply_all(ctx, __apply_ppgtt, vm); 777f5d974f9SChris Wilson 77810be98a7SChris Wilson return old; 77910be98a7SChris Wilson } 78010be98a7SChris Wilson 78110be98a7SChris Wilson static void __assign_ppgtt(struct i915_gem_context *ctx, 782e568ac38SChris Wilson struct i915_address_space *vm) 78310be98a7SChris Wilson { 784a4e7ccdaSChris Wilson if (vm == rcu_access_pointer(ctx->vm)) 78510be98a7SChris Wilson return; 78610be98a7SChris Wilson 787e568ac38SChris Wilson vm = __set_ppgtt(ctx, vm); 788e568ac38SChris Wilson if (vm) 7892850748eSChris Wilson i915_vm_close(vm); 79010be98a7SChris Wilson } 79110be98a7SChris Wilson 79275d0a7f3SChris Wilson static void __set_timeline(struct intel_timeline **dst, 79375d0a7f3SChris Wilson struct intel_timeline *src) 79475d0a7f3SChris Wilson { 79575d0a7f3SChris Wilson struct intel_timeline *old = *dst; 79675d0a7f3SChris Wilson 79775d0a7f3SChris Wilson *dst = src ? intel_timeline_get(src) : NULL; 79875d0a7f3SChris Wilson 79975d0a7f3SChris Wilson if (old) 80075d0a7f3SChris Wilson intel_timeline_put(old); 80175d0a7f3SChris Wilson } 80275d0a7f3SChris Wilson 80388be76cdSChris Wilson static int __apply_timeline(struct intel_context *ce, void *timeline) 80475d0a7f3SChris Wilson { 80575d0a7f3SChris Wilson __set_timeline(&ce->timeline, timeline); 80688be76cdSChris Wilson return 0; 80775d0a7f3SChris Wilson } 80875d0a7f3SChris Wilson 80975d0a7f3SChris Wilson static void __assign_timeline(struct i915_gem_context *ctx, 81075d0a7f3SChris Wilson struct intel_timeline *timeline) 81175d0a7f3SChris Wilson { 81275d0a7f3SChris Wilson __set_timeline(&ctx->timeline, timeline); 81375d0a7f3SChris Wilson context_apply_all(ctx, __apply_timeline, timeline); 81475d0a7f3SChris Wilson } 81575d0a7f3SChris Wilson 81610be98a7SChris Wilson static struct i915_gem_context * 817a4e7ccdaSChris Wilson i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) 81810be98a7SChris Wilson { 81910be98a7SChris Wilson struct i915_gem_context *ctx; 82010be98a7SChris Wilson 82110be98a7SChris Wilson if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && 822a4e7ccdaSChris Wilson !HAS_EXECLISTS(i915)) 82310be98a7SChris Wilson return ERR_PTR(-EINVAL); 82410be98a7SChris Wilson 825a4e7ccdaSChris Wilson ctx = __create_context(i915); 82610be98a7SChris Wilson if (IS_ERR(ctx)) 82710be98a7SChris Wilson return ctx; 82810be98a7SChris Wilson 829a4e7ccdaSChris Wilson if (HAS_FULL_PPGTT(i915)) { 830ab53497bSChris Wilson struct i915_ppgtt *ppgtt; 83110be98a7SChris Wilson 8322c86e55dSMatthew Auld ppgtt = i915_ppgtt_create(&i915->gt); 83310be98a7SChris Wilson if (IS_ERR(ppgtt)) { 834baa89ba3SWambui Karuga drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 83510be98a7SChris Wilson PTR_ERR(ppgtt)); 83610be98a7SChris Wilson context_close(ctx); 83710be98a7SChris Wilson return ERR_CAST(ppgtt); 83810be98a7SChris Wilson } 83910be98a7SChris Wilson 840a4e7ccdaSChris Wilson mutex_lock(&ctx->mutex); 841e568ac38SChris Wilson __assign_ppgtt(ctx, &ppgtt->vm); 842a4e7ccdaSChris Wilson mutex_unlock(&ctx->mutex); 843a4e7ccdaSChris Wilson 844e568ac38SChris Wilson i915_vm_put(&ppgtt->vm); 84510be98a7SChris Wilson } 84610be98a7SChris Wilson 84710be98a7SChris Wilson if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 848f0c02c1bSTvrtko Ursulin struct intel_timeline *timeline; 84910be98a7SChris Wilson 850d1bf5dd8SChris Wilson timeline = intel_timeline_create(&i915->gt); 85110be98a7SChris Wilson if (IS_ERR(timeline)) { 85210be98a7SChris Wilson context_close(ctx); 85310be98a7SChris Wilson return ERR_CAST(timeline); 85410be98a7SChris Wilson } 85510be98a7SChris Wilson 85675d0a7f3SChris Wilson __assign_timeline(ctx, timeline); 85775d0a7f3SChris Wilson intel_timeline_put(timeline); 85810be98a7SChris Wilson } 85910be98a7SChris Wilson 86010be98a7SChris Wilson trace_i915_context_create(ctx); 86110be98a7SChris Wilson 86210be98a7SChris Wilson return ctx; 86310be98a7SChris Wilson } 86410be98a7SChris Wilson 865a4e7ccdaSChris Wilson static void init_contexts(struct i915_gem_contexts *gc) 86610be98a7SChris Wilson { 867a4e7ccdaSChris Wilson spin_lock_init(&gc->lock); 868a4e7ccdaSChris Wilson INIT_LIST_HEAD(&gc->list); 86910be98a7SChris Wilson } 87010be98a7SChris Wilson 871e6ba7648SChris Wilson void i915_gem_init__contexts(struct drm_i915_private *i915) 87210be98a7SChris Wilson { 873a4e7ccdaSChris Wilson init_contexts(&i915->gem.contexts); 87410be98a7SChris Wilson } 87510be98a7SChris Wilson 87610be98a7SChris Wilson static int gem_context_register(struct i915_gem_context *ctx, 877c100777cSTvrtko Ursulin struct drm_i915_file_private *fpriv, 878c100777cSTvrtko Ursulin u32 *id) 87910be98a7SChris Wilson { 880eb4dedaeSChris Wilson struct drm_i915_private *i915 = ctx->i915; 881a4e7ccdaSChris Wilson struct i915_address_space *vm; 88210be98a7SChris Wilson int ret; 88310be98a7SChris Wilson 88410be98a7SChris Wilson ctx->file_priv = fpriv; 885a4e7ccdaSChris Wilson 886a4e7ccdaSChris Wilson mutex_lock(&ctx->mutex); 887a4e7ccdaSChris Wilson vm = i915_gem_context_vm(ctx); 888a4e7ccdaSChris Wilson if (vm) 889a4e7ccdaSChris Wilson WRITE_ONCE(vm->file, fpriv); /* XXX */ 890a4e7ccdaSChris Wilson mutex_unlock(&ctx->mutex); 89110be98a7SChris Wilson 89210be98a7SChris Wilson ctx->pid = get_task_pid(current, PIDTYPE_PID); 893fc4f125dSChris Wilson snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 89410be98a7SChris Wilson current->comm, pid_nr(ctx->pid)); 89510be98a7SChris Wilson 89610be98a7SChris Wilson /* And finally expose ourselves to userspace via the idr */ 897c100777cSTvrtko Ursulin ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); 898c100777cSTvrtko Ursulin if (ret) 899eb4dedaeSChris Wilson goto err_pid; 900c100777cSTvrtko Ursulin 901eb4dedaeSChris Wilson spin_lock(&i915->gem.contexts.lock); 902eb4dedaeSChris Wilson list_add_tail(&ctx->link, &i915->gem.contexts.list); 903eb4dedaeSChris Wilson spin_unlock(&i915->gem.contexts.lock); 904eb4dedaeSChris Wilson 905eb4dedaeSChris Wilson return 0; 906eb4dedaeSChris Wilson 907eb4dedaeSChris Wilson err_pid: 908eb4dedaeSChris Wilson put_pid(fetch_and_zero(&ctx->pid)); 90910be98a7SChris Wilson return ret; 91010be98a7SChris Wilson } 91110be98a7SChris Wilson 91210be98a7SChris Wilson int i915_gem_context_open(struct drm_i915_private *i915, 91310be98a7SChris Wilson struct drm_file *file) 91410be98a7SChris Wilson { 91510be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 91610be98a7SChris Wilson struct i915_gem_context *ctx; 91710be98a7SChris Wilson int err; 918c100777cSTvrtko Ursulin u32 id; 91910be98a7SChris Wilson 920c100777cSTvrtko Ursulin xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); 921c100777cSTvrtko Ursulin 9225dbd2b7bSChris Wilson /* 0 reserved for invalid/unassigned ppgtt */ 9235dbd2b7bSChris Wilson xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 92410be98a7SChris Wilson 92510be98a7SChris Wilson ctx = i915_gem_create_context(i915, 0); 92610be98a7SChris Wilson if (IS_ERR(ctx)) { 92710be98a7SChris Wilson err = PTR_ERR(ctx); 92810be98a7SChris Wilson goto err; 92910be98a7SChris Wilson } 93010be98a7SChris Wilson 931c100777cSTvrtko Ursulin err = gem_context_register(ctx, file_priv, &id); 93210be98a7SChris Wilson if (err < 0) 93310be98a7SChris Wilson goto err_ctx; 93410be98a7SChris Wilson 935c100777cSTvrtko Ursulin GEM_BUG_ON(id); 93610be98a7SChris Wilson return 0; 93710be98a7SChris Wilson 93810be98a7SChris Wilson err_ctx: 93910be98a7SChris Wilson context_close(ctx); 94010be98a7SChris Wilson err: 9415dbd2b7bSChris Wilson xa_destroy(&file_priv->vm_xa); 942c100777cSTvrtko Ursulin xa_destroy(&file_priv->context_xa); 94310be98a7SChris Wilson return err; 94410be98a7SChris Wilson } 94510be98a7SChris Wilson 94610be98a7SChris Wilson void i915_gem_context_close(struct drm_file *file) 94710be98a7SChris Wilson { 94810be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 9495dbd2b7bSChris Wilson struct i915_address_space *vm; 950c100777cSTvrtko Ursulin struct i915_gem_context *ctx; 951c100777cSTvrtko Ursulin unsigned long idx; 95210be98a7SChris Wilson 953c100777cSTvrtko Ursulin xa_for_each(&file_priv->context_xa, idx, ctx) 954c100777cSTvrtko Ursulin context_close(ctx); 955c100777cSTvrtko Ursulin xa_destroy(&file_priv->context_xa); 95610be98a7SChris Wilson 9575dbd2b7bSChris Wilson xa_for_each(&file_priv->vm_xa, idx, vm) 9585dbd2b7bSChris Wilson i915_vm_put(vm); 9595dbd2b7bSChris Wilson xa_destroy(&file_priv->vm_xa); 96010be98a7SChris Wilson } 96110be98a7SChris Wilson 96210be98a7SChris Wilson int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 96310be98a7SChris Wilson struct drm_file *file) 96410be98a7SChris Wilson { 96510be98a7SChris Wilson struct drm_i915_private *i915 = to_i915(dev); 96610be98a7SChris Wilson struct drm_i915_gem_vm_control *args = data; 96710be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 968ab53497bSChris Wilson struct i915_ppgtt *ppgtt; 9695dbd2b7bSChris Wilson u32 id; 97010be98a7SChris Wilson int err; 97110be98a7SChris Wilson 97210be98a7SChris Wilson if (!HAS_FULL_PPGTT(i915)) 97310be98a7SChris Wilson return -ENODEV; 97410be98a7SChris Wilson 97510be98a7SChris Wilson if (args->flags) 97610be98a7SChris Wilson return -EINVAL; 97710be98a7SChris Wilson 9782c86e55dSMatthew Auld ppgtt = i915_ppgtt_create(&i915->gt); 97910be98a7SChris Wilson if (IS_ERR(ppgtt)) 98010be98a7SChris Wilson return PTR_ERR(ppgtt); 98110be98a7SChris Wilson 98210be98a7SChris Wilson ppgtt->vm.file = file_priv; 98310be98a7SChris Wilson 98410be98a7SChris Wilson if (args->extensions) { 98510be98a7SChris Wilson err = i915_user_extensions(u64_to_user_ptr(args->extensions), 98610be98a7SChris Wilson NULL, 0, 98710be98a7SChris Wilson ppgtt); 98810be98a7SChris Wilson if (err) 98910be98a7SChris Wilson goto err_put; 99010be98a7SChris Wilson } 99110be98a7SChris Wilson 9925dbd2b7bSChris Wilson err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 9935dbd2b7bSChris Wilson xa_limit_32b, GFP_KERNEL); 99410be98a7SChris Wilson if (err) 99510be98a7SChris Wilson goto err_put; 99610be98a7SChris Wilson 9975dbd2b7bSChris Wilson GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 9985dbd2b7bSChris Wilson args->vm_id = id; 99910be98a7SChris Wilson return 0; 100010be98a7SChris Wilson 100110be98a7SChris Wilson err_put: 1002e568ac38SChris Wilson i915_vm_put(&ppgtt->vm); 100310be98a7SChris Wilson return err; 100410be98a7SChris Wilson } 100510be98a7SChris Wilson 100610be98a7SChris Wilson int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 100710be98a7SChris Wilson struct drm_file *file) 100810be98a7SChris Wilson { 100910be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 101010be98a7SChris Wilson struct drm_i915_gem_vm_control *args = data; 1011e568ac38SChris Wilson struct i915_address_space *vm; 101210be98a7SChris Wilson 101310be98a7SChris Wilson if (args->flags) 101410be98a7SChris Wilson return -EINVAL; 101510be98a7SChris Wilson 101610be98a7SChris Wilson if (args->extensions) 101710be98a7SChris Wilson return -EINVAL; 101810be98a7SChris Wilson 10195dbd2b7bSChris Wilson vm = xa_erase(&file_priv->vm_xa, args->vm_id); 1020e568ac38SChris Wilson if (!vm) 102110be98a7SChris Wilson return -ENOENT; 102210be98a7SChris Wilson 1023e568ac38SChris Wilson i915_vm_put(vm); 102410be98a7SChris Wilson return 0; 102510be98a7SChris Wilson } 102610be98a7SChris Wilson 102710be98a7SChris Wilson struct context_barrier_task { 102810be98a7SChris Wilson struct i915_active base; 102910be98a7SChris Wilson void (*task)(void *data); 103010be98a7SChris Wilson void *data; 103110be98a7SChris Wilson }; 103210be98a7SChris Wilson 1033274cbf20SChris Wilson __i915_active_call 103410be98a7SChris Wilson static void cb_retire(struct i915_active *base) 103510be98a7SChris Wilson { 103610be98a7SChris Wilson struct context_barrier_task *cb = container_of(base, typeof(*cb), base); 103710be98a7SChris Wilson 103810be98a7SChris Wilson if (cb->task) 103910be98a7SChris Wilson cb->task(cb->data); 104010be98a7SChris Wilson 104110be98a7SChris Wilson i915_active_fini(&cb->base); 104210be98a7SChris Wilson kfree(cb); 104310be98a7SChris Wilson } 104410be98a7SChris Wilson 104510be98a7SChris Wilson I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); 104610be98a7SChris Wilson static int context_barrier_task(struct i915_gem_context *ctx, 104710be98a7SChris Wilson intel_engine_mask_t engines, 10481fe2d6f9SChris Wilson bool (*skip)(struct intel_context *ce, void *data), 104999f08d67SMaarten Lankhorst int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data), 105010be98a7SChris Wilson int (*emit)(struct i915_request *rq, void *data), 105110be98a7SChris Wilson void (*task)(void *data), 105210be98a7SChris Wilson void *data) 105310be98a7SChris Wilson { 105410be98a7SChris Wilson struct context_barrier_task *cb; 105510be98a7SChris Wilson struct i915_gem_engines_iter it; 105670c96e39SChris Wilson struct i915_gem_engines *e; 105799f08d67SMaarten Lankhorst struct i915_gem_ww_ctx ww; 105810be98a7SChris Wilson struct intel_context *ce; 105910be98a7SChris Wilson int err = 0; 106010be98a7SChris Wilson 106110be98a7SChris Wilson GEM_BUG_ON(!task); 106210be98a7SChris Wilson 106310be98a7SChris Wilson cb = kmalloc(sizeof(*cb), GFP_KERNEL); 106410be98a7SChris Wilson if (!cb) 106510be98a7SChris Wilson return -ENOMEM; 106610be98a7SChris Wilson 1067b1e3177bSChris Wilson i915_active_init(&cb->base, NULL, cb_retire); 106812c255b5SChris Wilson err = i915_active_acquire(&cb->base); 106912c255b5SChris Wilson if (err) { 107012c255b5SChris Wilson kfree(cb); 107112c255b5SChris Wilson return err; 107212c255b5SChris Wilson } 107310be98a7SChris Wilson 107470c96e39SChris Wilson e = __context_engines_await(ctx); 107570c96e39SChris Wilson if (!e) { 107670c96e39SChris Wilson i915_active_release(&cb->base); 107770c96e39SChris Wilson return -ENOENT; 107870c96e39SChris Wilson } 107970c96e39SChris Wilson 108070c96e39SChris Wilson for_each_gem_engine(ce, e, it) { 108110be98a7SChris Wilson struct i915_request *rq; 108210be98a7SChris Wilson 108310be98a7SChris Wilson if (I915_SELFTEST_ONLY(context_barrier_inject_fault & 108410be98a7SChris Wilson ce->engine->mask)) { 108510be98a7SChris Wilson err = -ENXIO; 108610be98a7SChris Wilson break; 108710be98a7SChris Wilson } 108810be98a7SChris Wilson 10891fe2d6f9SChris Wilson if (!(ce->engine->mask & engines)) 10901fe2d6f9SChris Wilson continue; 10911fe2d6f9SChris Wilson 10921fe2d6f9SChris Wilson if (skip && skip(ce, data)) 109310be98a7SChris Wilson continue; 109410be98a7SChris Wilson 109599f08d67SMaarten Lankhorst i915_gem_ww_ctx_init(&ww, true); 109699f08d67SMaarten Lankhorst retry: 109747b08693SMaarten Lankhorst err = intel_context_pin_ww(ce, &ww); 109899f08d67SMaarten Lankhorst if (err) 109999f08d67SMaarten Lankhorst goto err; 110099f08d67SMaarten Lankhorst 110199f08d67SMaarten Lankhorst if (pin) 110299f08d67SMaarten Lankhorst err = pin(ce, &ww, data); 110399f08d67SMaarten Lankhorst if (err) 110499f08d67SMaarten Lankhorst goto err_unpin; 110599f08d67SMaarten Lankhorst 110699f08d67SMaarten Lankhorst rq = i915_request_create(ce); 110710be98a7SChris Wilson if (IS_ERR(rq)) { 110810be98a7SChris Wilson err = PTR_ERR(rq); 110999f08d67SMaarten Lankhorst goto err_unpin; 111010be98a7SChris Wilson } 111110be98a7SChris Wilson 111210be98a7SChris Wilson err = 0; 111310be98a7SChris Wilson if (emit) 111410be98a7SChris Wilson err = emit(rq, data); 111510be98a7SChris Wilson if (err == 0) 1116d19d71fcSChris Wilson err = i915_active_add_request(&cb->base, rq); 111710be98a7SChris Wilson 111810be98a7SChris Wilson i915_request_add(rq); 111999f08d67SMaarten Lankhorst err_unpin: 112099f08d67SMaarten Lankhorst intel_context_unpin(ce); 112199f08d67SMaarten Lankhorst err: 112299f08d67SMaarten Lankhorst if (err == -EDEADLK) { 112399f08d67SMaarten Lankhorst err = i915_gem_ww_ctx_backoff(&ww); 112499f08d67SMaarten Lankhorst if (!err) 112599f08d67SMaarten Lankhorst goto retry; 112699f08d67SMaarten Lankhorst } 112799f08d67SMaarten Lankhorst i915_gem_ww_ctx_fini(&ww); 112899f08d67SMaarten Lankhorst 112910be98a7SChris Wilson if (err) 113010be98a7SChris Wilson break; 113110be98a7SChris Wilson } 113270c96e39SChris Wilson i915_sw_fence_complete(&e->fence); 113310be98a7SChris Wilson 113410be98a7SChris Wilson cb->task = err ? NULL : task; /* caller needs to unwind instead */ 113510be98a7SChris Wilson cb->data = data; 113610be98a7SChris Wilson 113710be98a7SChris Wilson i915_active_release(&cb->base); 113810be98a7SChris Wilson 113910be98a7SChris Wilson return err; 114010be98a7SChris Wilson } 114110be98a7SChris Wilson 114210be98a7SChris Wilson static int get_ppgtt(struct drm_i915_file_private *file_priv, 114310be98a7SChris Wilson struct i915_gem_context *ctx, 114410be98a7SChris Wilson struct drm_i915_gem_context_param *args) 114510be98a7SChris Wilson { 1146e568ac38SChris Wilson struct i915_address_space *vm; 11475dbd2b7bSChris Wilson int err; 11485dbd2b7bSChris Wilson u32 id; 114910be98a7SChris Wilson 1150a4e7ccdaSChris Wilson if (!rcu_access_pointer(ctx->vm)) 115110be98a7SChris Wilson return -ENODEV; 115210be98a7SChris Wilson 1153a4e7ccdaSChris Wilson rcu_read_lock(); 115427dbae8fSChris Wilson vm = context_get_vm_rcu(ctx); 1155a4e7ccdaSChris Wilson rcu_read_unlock(); 115690211ea4SChris Wilson if (!vm) 115790211ea4SChris Wilson return -ENODEV; 115890211ea4SChris Wilson 115990211ea4SChris Wilson err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 11605dbd2b7bSChris Wilson if (err) 116110be98a7SChris Wilson goto err_put; 116210be98a7SChris Wilson 11632850748eSChris Wilson i915_vm_open(vm); 116410be98a7SChris Wilson 11655dbd2b7bSChris Wilson GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 11665dbd2b7bSChris Wilson args->value = id; 116710be98a7SChris Wilson args->size = 0; 116810be98a7SChris Wilson 116910be98a7SChris Wilson err_put: 1170e568ac38SChris Wilson i915_vm_put(vm); 11715dbd2b7bSChris Wilson return err; 117210be98a7SChris Wilson } 117310be98a7SChris Wilson 117410be98a7SChris Wilson static void set_ppgtt_barrier(void *data) 117510be98a7SChris Wilson { 1176e568ac38SChris Wilson struct i915_address_space *old = data; 117710be98a7SChris Wilson 1178e568ac38SChris Wilson if (INTEL_GEN(old->i915) < 8) 1179e568ac38SChris Wilson gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); 118010be98a7SChris Wilson 11812850748eSChris Wilson i915_vm_close(old); 118210be98a7SChris Wilson } 118310be98a7SChris Wilson 118499f08d67SMaarten Lankhorst static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data) 118599f08d67SMaarten Lankhorst { 118699f08d67SMaarten Lankhorst struct i915_address_space *vm = ce->vm; 118799f08d67SMaarten Lankhorst 118899f08d67SMaarten Lankhorst if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915)) 118999f08d67SMaarten Lankhorst /* ppGTT is not part of the legacy context image */ 119047b08693SMaarten Lankhorst return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww); 119199f08d67SMaarten Lankhorst 119299f08d67SMaarten Lankhorst return 0; 119399f08d67SMaarten Lankhorst } 119499f08d67SMaarten Lankhorst 119510be98a7SChris Wilson static int emit_ppgtt_update(struct i915_request *rq, void *data) 119610be98a7SChris Wilson { 11979f3ccd40SChris Wilson struct i915_address_space *vm = rq->context->vm; 119810be98a7SChris Wilson struct intel_engine_cs *engine = rq->engine; 119910be98a7SChris Wilson u32 base = engine->mmio_base; 120010be98a7SChris Wilson u32 *cs; 120110be98a7SChris Wilson int i; 120210be98a7SChris Wilson 1203e568ac38SChris Wilson if (i915_vm_is_4lvl(vm)) { 1204ab53497bSChris Wilson struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1205b5b7bef9SMika Kuoppala const dma_addr_t pd_daddr = px_dma(ppgtt->pd); 120610be98a7SChris Wilson 120710be98a7SChris Wilson cs = intel_ring_begin(rq, 6); 120810be98a7SChris Wilson if (IS_ERR(cs)) 120910be98a7SChris Wilson return PTR_ERR(cs); 121010be98a7SChris Wilson 121110be98a7SChris Wilson *cs++ = MI_LOAD_REGISTER_IMM(2); 121210be98a7SChris Wilson 121310be98a7SChris Wilson *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 121410be98a7SChris Wilson *cs++ = upper_32_bits(pd_daddr); 121510be98a7SChris Wilson *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 121610be98a7SChris Wilson *cs++ = lower_32_bits(pd_daddr); 121710be98a7SChris Wilson 121810be98a7SChris Wilson *cs++ = MI_NOOP; 121910be98a7SChris Wilson intel_ring_advance(rq, cs); 122010be98a7SChris Wilson } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { 1221ab53497bSChris Wilson struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1222191797a8SChris Wilson int err; 1223191797a8SChris Wilson 1224191797a8SChris Wilson /* Magic required to prevent forcewake errors! */ 1225191797a8SChris Wilson err = engine->emit_flush(rq, EMIT_INVALIDATE); 1226191797a8SChris Wilson if (err) 1227191797a8SChris Wilson return err; 1228e568ac38SChris Wilson 122910be98a7SChris Wilson cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 123010be98a7SChris Wilson if (IS_ERR(cs)) 123110be98a7SChris Wilson return PTR_ERR(cs); 123210be98a7SChris Wilson 1233191797a8SChris Wilson *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 123410be98a7SChris Wilson for (i = GEN8_3LVL_PDPES; i--; ) { 123510be98a7SChris Wilson const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 123610be98a7SChris Wilson 123710be98a7SChris Wilson *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 123810be98a7SChris Wilson *cs++ = upper_32_bits(pd_daddr); 123910be98a7SChris Wilson *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 124010be98a7SChris Wilson *cs++ = lower_32_bits(pd_daddr); 124110be98a7SChris Wilson } 124210be98a7SChris Wilson *cs++ = MI_NOOP; 124310be98a7SChris Wilson intel_ring_advance(rq, cs); 124410be98a7SChris Wilson } 124510be98a7SChris Wilson 124610be98a7SChris Wilson return 0; 124710be98a7SChris Wilson } 124810be98a7SChris Wilson 12491fe2d6f9SChris Wilson static bool skip_ppgtt_update(struct intel_context *ce, void *data) 12501fe2d6f9SChris Wilson { 12511fe2d6f9SChris Wilson if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) 125299f08d67SMaarten Lankhorst return !ce->state; 125399f08d67SMaarten Lankhorst else 125499f08d67SMaarten Lankhorst return !atomic_read(&ce->pin_count); 12551fe2d6f9SChris Wilson } 12561fe2d6f9SChris Wilson 125710be98a7SChris Wilson static int set_ppgtt(struct drm_i915_file_private *file_priv, 125810be98a7SChris Wilson struct i915_gem_context *ctx, 125910be98a7SChris Wilson struct drm_i915_gem_context_param *args) 126010be98a7SChris Wilson { 1261e568ac38SChris Wilson struct i915_address_space *vm, *old; 126210be98a7SChris Wilson int err; 126310be98a7SChris Wilson 126410be98a7SChris Wilson if (args->size) 126510be98a7SChris Wilson return -EINVAL; 126610be98a7SChris Wilson 1267a4e7ccdaSChris Wilson if (!rcu_access_pointer(ctx->vm)) 126810be98a7SChris Wilson return -ENODEV; 126910be98a7SChris Wilson 127010be98a7SChris Wilson if (upper_32_bits(args->value)) 127110be98a7SChris Wilson return -ENOENT; 127210be98a7SChris Wilson 1273aabbe344SChris Wilson rcu_read_lock(); 12745dbd2b7bSChris Wilson vm = xa_load(&file_priv->vm_xa, args->value); 1275aabbe344SChris Wilson if (vm && !kref_get_unless_zero(&vm->ref)) 1276aabbe344SChris Wilson vm = NULL; 1277aabbe344SChris Wilson rcu_read_unlock(); 1278e568ac38SChris Wilson if (!vm) 127910be98a7SChris Wilson return -ENOENT; 128010be98a7SChris Wilson 1281a4e7ccdaSChris Wilson err = mutex_lock_interruptible(&ctx->mutex); 128210be98a7SChris Wilson if (err) 128310be98a7SChris Wilson goto out; 128410be98a7SChris Wilson 1285a4e7ccdaSChris Wilson if (i915_gem_context_is_closed(ctx)) { 1286a4e7ccdaSChris Wilson err = -ENOENT; 1287feba2b81SChris Wilson goto unlock; 1288a4e7ccdaSChris Wilson } 1289a4e7ccdaSChris Wilson 1290a4e7ccdaSChris Wilson if (vm == rcu_access_pointer(ctx->vm)) 129110be98a7SChris Wilson goto unlock; 129210be98a7SChris Wilson 1293f7ce8639SChris Wilson old = __set_ppgtt(ctx, vm); 1294f7ce8639SChris Wilson 129510be98a7SChris Wilson /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ 129610be98a7SChris Wilson lut_close(ctx); 129710be98a7SChris Wilson 129810be98a7SChris Wilson /* 129910be98a7SChris Wilson * We need to flush any requests using the current ppgtt before 130010be98a7SChris Wilson * we release it as the requests do not hold a reference themselves, 130110be98a7SChris Wilson * only indirectly through the context. 130210be98a7SChris Wilson */ 130310be98a7SChris Wilson err = context_barrier_task(ctx, ALL_ENGINES, 13041fe2d6f9SChris Wilson skip_ppgtt_update, 130599f08d67SMaarten Lankhorst pin_ppgtt_update, 130610be98a7SChris Wilson emit_ppgtt_update, 130710be98a7SChris Wilson set_ppgtt_barrier, 130810be98a7SChris Wilson old); 130910be98a7SChris Wilson if (err) { 13102850748eSChris Wilson i915_vm_close(__set_ppgtt(ctx, old)); 13112850748eSChris Wilson i915_vm_close(old); 1312f7ce8639SChris Wilson lut_close(ctx); /* force a rebuild of the old obj:vma cache */ 131310be98a7SChris Wilson } 131410be98a7SChris Wilson 131510be98a7SChris Wilson unlock: 1316a4e7ccdaSChris Wilson mutex_unlock(&ctx->mutex); 131710be98a7SChris Wilson out: 1318e568ac38SChris Wilson i915_vm_put(vm); 131910be98a7SChris Wilson return err; 132010be98a7SChris Wilson } 132110be98a7SChris Wilson 132288be76cdSChris Wilson static int __apply_ringsize(struct intel_context *ce, void *sz) 132388be76cdSChris Wilson { 132488be76cdSChris Wilson return intel_context_set_ring_size(ce, (unsigned long)sz); 132588be76cdSChris Wilson } 132688be76cdSChris Wilson 132788be76cdSChris Wilson static int set_ringsize(struct i915_gem_context *ctx, 132888be76cdSChris Wilson struct drm_i915_gem_context_param *args) 132988be76cdSChris Wilson { 133088be76cdSChris Wilson if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915)) 133188be76cdSChris Wilson return -ENODEV; 133288be76cdSChris Wilson 133388be76cdSChris Wilson if (args->size) 133488be76cdSChris Wilson return -EINVAL; 133588be76cdSChris Wilson 133688be76cdSChris Wilson if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE)) 133788be76cdSChris Wilson return -EINVAL; 133888be76cdSChris Wilson 133988be76cdSChris Wilson if (args->value < I915_GTT_PAGE_SIZE) 134088be76cdSChris Wilson return -EINVAL; 134188be76cdSChris Wilson 134288be76cdSChris Wilson if (args->value > 128 * I915_GTT_PAGE_SIZE) 134388be76cdSChris Wilson return -EINVAL; 134488be76cdSChris Wilson 134588be76cdSChris Wilson return context_apply_all(ctx, 134688be76cdSChris Wilson __apply_ringsize, 134788be76cdSChris Wilson __intel_context_ring_size(args->value)); 134888be76cdSChris Wilson } 134988be76cdSChris Wilson 135088be76cdSChris Wilson static int __get_ringsize(struct intel_context *ce, void *arg) 135188be76cdSChris Wilson { 135288be76cdSChris Wilson long sz; 135388be76cdSChris Wilson 135488be76cdSChris Wilson sz = intel_context_get_ring_size(ce); 135588be76cdSChris Wilson GEM_BUG_ON(sz > INT_MAX); 135688be76cdSChris Wilson 135788be76cdSChris Wilson return sz; /* stop on first engine */ 135888be76cdSChris Wilson } 135988be76cdSChris Wilson 136088be76cdSChris Wilson static int get_ringsize(struct i915_gem_context *ctx, 136188be76cdSChris Wilson struct drm_i915_gem_context_param *args) 136288be76cdSChris Wilson { 136388be76cdSChris Wilson int sz; 136488be76cdSChris Wilson 136588be76cdSChris Wilson if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915)) 136688be76cdSChris Wilson return -ENODEV; 136788be76cdSChris Wilson 136888be76cdSChris Wilson if (args->size) 136988be76cdSChris Wilson return -EINVAL; 137088be76cdSChris Wilson 137188be76cdSChris Wilson sz = context_apply_all(ctx, __get_ringsize, NULL); 137288be76cdSChris Wilson if (sz < 0) 137388be76cdSChris Wilson return sz; 137488be76cdSChris Wilson 137588be76cdSChris Wilson args->value = sz; 137688be76cdSChris Wilson return 0; 137788be76cdSChris Wilson } 137888be76cdSChris Wilson 137911ecbdddSLionel Landwerlin int 13800b6613c6SVenkata Sandeep Dhanalakota i915_gem_user_to_context_sseu(struct intel_gt *gt, 138110be98a7SChris Wilson const struct drm_i915_gem_context_param_sseu *user, 138210be98a7SChris Wilson struct intel_sseu *context) 138310be98a7SChris Wilson { 13840b6613c6SVenkata Sandeep Dhanalakota const struct sseu_dev_info *device = >->info.sseu; 13850b6613c6SVenkata Sandeep Dhanalakota struct drm_i915_private *i915 = gt->i915; 138610be98a7SChris Wilson 138710be98a7SChris Wilson /* No zeros in any field. */ 138810be98a7SChris Wilson if (!user->slice_mask || !user->subslice_mask || 138910be98a7SChris Wilson !user->min_eus_per_subslice || !user->max_eus_per_subslice) 139010be98a7SChris Wilson return -EINVAL; 139110be98a7SChris Wilson 139210be98a7SChris Wilson /* Max > min. */ 139310be98a7SChris Wilson if (user->max_eus_per_subslice < user->min_eus_per_subslice) 139410be98a7SChris Wilson return -EINVAL; 139510be98a7SChris Wilson 139610be98a7SChris Wilson /* 139710be98a7SChris Wilson * Some future proofing on the types since the uAPI is wider than the 139810be98a7SChris Wilson * current internal implementation. 139910be98a7SChris Wilson */ 140010be98a7SChris Wilson if (overflows_type(user->slice_mask, context->slice_mask) || 140110be98a7SChris Wilson overflows_type(user->subslice_mask, context->subslice_mask) || 140210be98a7SChris Wilson overflows_type(user->min_eus_per_subslice, 140310be98a7SChris Wilson context->min_eus_per_subslice) || 140410be98a7SChris Wilson overflows_type(user->max_eus_per_subslice, 140510be98a7SChris Wilson context->max_eus_per_subslice)) 140610be98a7SChris Wilson return -EINVAL; 140710be98a7SChris Wilson 140810be98a7SChris Wilson /* Check validity against hardware. */ 140910be98a7SChris Wilson if (user->slice_mask & ~device->slice_mask) 141010be98a7SChris Wilson return -EINVAL; 141110be98a7SChris Wilson 141210be98a7SChris Wilson if (user->subslice_mask & ~device->subslice_mask[0]) 141310be98a7SChris Wilson return -EINVAL; 141410be98a7SChris Wilson 141510be98a7SChris Wilson if (user->max_eus_per_subslice > device->max_eus_per_subslice) 141610be98a7SChris Wilson return -EINVAL; 141710be98a7SChris Wilson 141810be98a7SChris Wilson context->slice_mask = user->slice_mask; 141910be98a7SChris Wilson context->subslice_mask = user->subslice_mask; 142010be98a7SChris Wilson context->min_eus_per_subslice = user->min_eus_per_subslice; 142110be98a7SChris Wilson context->max_eus_per_subslice = user->max_eus_per_subslice; 142210be98a7SChris Wilson 142310be98a7SChris Wilson /* Part specific restrictions. */ 142410be98a7SChris Wilson if (IS_GEN(i915, 11)) { 142510be98a7SChris Wilson unsigned int hw_s = hweight8(device->slice_mask); 142610be98a7SChris Wilson unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 142710be98a7SChris Wilson unsigned int req_s = hweight8(context->slice_mask); 142810be98a7SChris Wilson unsigned int req_ss = hweight8(context->subslice_mask); 142910be98a7SChris Wilson 143010be98a7SChris Wilson /* 143110be98a7SChris Wilson * Only full subslice enablement is possible if more than one 143210be98a7SChris Wilson * slice is turned on. 143310be98a7SChris Wilson */ 143410be98a7SChris Wilson if (req_s > 1 && req_ss != hw_ss_per_s) 143510be98a7SChris Wilson return -EINVAL; 143610be98a7SChris Wilson 143710be98a7SChris Wilson /* 143810be98a7SChris Wilson * If more than four (SScount bitfield limit) subslices are 143910be98a7SChris Wilson * requested then the number has to be even. 144010be98a7SChris Wilson */ 144110be98a7SChris Wilson if (req_ss > 4 && (req_ss & 1)) 144210be98a7SChris Wilson return -EINVAL; 144310be98a7SChris Wilson 144410be98a7SChris Wilson /* 144510be98a7SChris Wilson * If only one slice is enabled and subslice count is below the 144610be98a7SChris Wilson * device full enablement, it must be at most half of the all 144710be98a7SChris Wilson * available subslices. 144810be98a7SChris Wilson */ 144910be98a7SChris Wilson if (req_s == 1 && req_ss < hw_ss_per_s && 145010be98a7SChris Wilson req_ss > (hw_ss_per_s / 2)) 145110be98a7SChris Wilson return -EINVAL; 145210be98a7SChris Wilson 145310be98a7SChris Wilson /* ABI restriction - VME use case only. */ 145410be98a7SChris Wilson 145510be98a7SChris Wilson /* All slices or one slice only. */ 145610be98a7SChris Wilson if (req_s != 1 && req_s != hw_s) 145710be98a7SChris Wilson return -EINVAL; 145810be98a7SChris Wilson 145910be98a7SChris Wilson /* 146010be98a7SChris Wilson * Half subslices or full enablement only when one slice is 146110be98a7SChris Wilson * enabled. 146210be98a7SChris Wilson */ 146310be98a7SChris Wilson if (req_s == 1 && 146410be98a7SChris Wilson (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 146510be98a7SChris Wilson return -EINVAL; 146610be98a7SChris Wilson 146710be98a7SChris Wilson /* No EU configuration changes. */ 146810be98a7SChris Wilson if ((user->min_eus_per_subslice != 146910be98a7SChris Wilson device->max_eus_per_subslice) || 147010be98a7SChris Wilson (user->max_eus_per_subslice != 147110be98a7SChris Wilson device->max_eus_per_subslice)) 147210be98a7SChris Wilson return -EINVAL; 147310be98a7SChris Wilson } 147410be98a7SChris Wilson 147510be98a7SChris Wilson return 0; 147610be98a7SChris Wilson } 147710be98a7SChris Wilson 147810be98a7SChris Wilson static int set_sseu(struct i915_gem_context *ctx, 147910be98a7SChris Wilson struct drm_i915_gem_context_param *args) 148010be98a7SChris Wilson { 148110be98a7SChris Wilson struct drm_i915_private *i915 = ctx->i915; 148210be98a7SChris Wilson struct drm_i915_gem_context_param_sseu user_sseu; 148310be98a7SChris Wilson struct intel_context *ce; 148410be98a7SChris Wilson struct intel_sseu sseu; 148510be98a7SChris Wilson unsigned long lookup; 148610be98a7SChris Wilson int ret; 148710be98a7SChris Wilson 148810be98a7SChris Wilson if (args->size < sizeof(user_sseu)) 148910be98a7SChris Wilson return -EINVAL; 149010be98a7SChris Wilson 149110be98a7SChris Wilson if (!IS_GEN(i915, 11)) 149210be98a7SChris Wilson return -ENODEV; 149310be98a7SChris Wilson 149410be98a7SChris Wilson if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 149510be98a7SChris Wilson sizeof(user_sseu))) 149610be98a7SChris Wilson return -EFAULT; 149710be98a7SChris Wilson 149810be98a7SChris Wilson if (user_sseu.rsvd) 149910be98a7SChris Wilson return -EINVAL; 150010be98a7SChris Wilson 150110be98a7SChris Wilson if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 150210be98a7SChris Wilson return -EINVAL; 150310be98a7SChris Wilson 150410be98a7SChris Wilson lookup = 0; 150510be98a7SChris Wilson if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 150610be98a7SChris Wilson lookup |= LOOKUP_USER_INDEX; 150710be98a7SChris Wilson 150810be98a7SChris Wilson ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 150910be98a7SChris Wilson if (IS_ERR(ce)) 151010be98a7SChris Wilson return PTR_ERR(ce); 151110be98a7SChris Wilson 151210be98a7SChris Wilson /* Only render engine supports RPCS configuration. */ 151310be98a7SChris Wilson if (ce->engine->class != RENDER_CLASS) { 151410be98a7SChris Wilson ret = -ENODEV; 151510be98a7SChris Wilson goto out_ce; 151610be98a7SChris Wilson } 151710be98a7SChris Wilson 15180b6613c6SVenkata Sandeep Dhanalakota ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); 151910be98a7SChris Wilson if (ret) 152010be98a7SChris Wilson goto out_ce; 152110be98a7SChris Wilson 152210be98a7SChris Wilson ret = intel_context_reconfigure_sseu(ce, sseu); 152310be98a7SChris Wilson if (ret) 152410be98a7SChris Wilson goto out_ce; 152510be98a7SChris Wilson 152610be98a7SChris Wilson args->size = sizeof(user_sseu); 152710be98a7SChris Wilson 152810be98a7SChris Wilson out_ce: 152910be98a7SChris Wilson intel_context_put(ce); 153010be98a7SChris Wilson return ret; 153110be98a7SChris Wilson } 153210be98a7SChris Wilson 153310be98a7SChris Wilson struct set_engines { 153410be98a7SChris Wilson struct i915_gem_context *ctx; 153510be98a7SChris Wilson struct i915_gem_engines *engines; 153610be98a7SChris Wilson }; 153710be98a7SChris Wilson 153810be98a7SChris Wilson static int 153910be98a7SChris Wilson set_engines__load_balance(struct i915_user_extension __user *base, void *data) 154010be98a7SChris Wilson { 154110be98a7SChris Wilson struct i915_context_engines_load_balance __user *ext = 154210be98a7SChris Wilson container_of_user(base, typeof(*ext), base); 154310be98a7SChris Wilson const struct set_engines *set = data; 1544d0bf4582SWambui Karuga struct drm_i915_private *i915 = set->ctx->i915; 154510be98a7SChris Wilson struct intel_engine_cs *stack[16]; 154610be98a7SChris Wilson struct intel_engine_cs **siblings; 154710be98a7SChris Wilson struct intel_context *ce; 154810be98a7SChris Wilson u16 num_siblings, idx; 154910be98a7SChris Wilson unsigned int n; 155010be98a7SChris Wilson int err; 155110be98a7SChris Wilson 1552d0bf4582SWambui Karuga if (!HAS_EXECLISTS(i915)) 155310be98a7SChris Wilson return -ENODEV; 155410be98a7SChris Wilson 1555065273f7SDaniele Ceraolo Spurio if (intel_uc_uses_guc_submission(&i915->gt.uc)) 155610be98a7SChris Wilson return -ENODEV; /* not implement yet */ 155710be98a7SChris Wilson 155810be98a7SChris Wilson if (get_user(idx, &ext->engine_index)) 155910be98a7SChris Wilson return -EFAULT; 156010be98a7SChris Wilson 156110be98a7SChris Wilson if (idx >= set->engines->num_engines) { 1562d0bf4582SWambui Karuga drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 156310be98a7SChris Wilson idx, set->engines->num_engines); 156410be98a7SChris Wilson return -EINVAL; 156510be98a7SChris Wilson } 156610be98a7SChris Wilson 156710be98a7SChris Wilson idx = array_index_nospec(idx, set->engines->num_engines); 156810be98a7SChris Wilson if (set->engines->engines[idx]) { 1569d0bf4582SWambui Karuga drm_dbg(&i915->drm, 1570d0bf4582SWambui Karuga "Invalid placement[%d], already occupied\n", idx); 157110be98a7SChris Wilson return -EEXIST; 157210be98a7SChris Wilson } 157310be98a7SChris Wilson 157410be98a7SChris Wilson if (get_user(num_siblings, &ext->num_siblings)) 157510be98a7SChris Wilson return -EFAULT; 157610be98a7SChris Wilson 157710be98a7SChris Wilson err = check_user_mbz(&ext->flags); 157810be98a7SChris Wilson if (err) 157910be98a7SChris Wilson return err; 158010be98a7SChris Wilson 158110be98a7SChris Wilson err = check_user_mbz(&ext->mbz64); 158210be98a7SChris Wilson if (err) 158310be98a7SChris Wilson return err; 158410be98a7SChris Wilson 158510be98a7SChris Wilson siblings = stack; 158610be98a7SChris Wilson if (num_siblings > ARRAY_SIZE(stack)) { 158710be98a7SChris Wilson siblings = kmalloc_array(num_siblings, 158810be98a7SChris Wilson sizeof(*siblings), 158910be98a7SChris Wilson GFP_KERNEL); 159010be98a7SChris Wilson if (!siblings) 159110be98a7SChris Wilson return -ENOMEM; 159210be98a7SChris Wilson } 159310be98a7SChris Wilson 159410be98a7SChris Wilson for (n = 0; n < num_siblings; n++) { 159510be98a7SChris Wilson struct i915_engine_class_instance ci; 159610be98a7SChris Wilson 159710be98a7SChris Wilson if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 159810be98a7SChris Wilson err = -EFAULT; 159910be98a7SChris Wilson goto out_siblings; 160010be98a7SChris Wilson } 160110be98a7SChris Wilson 1602d0bf4582SWambui Karuga siblings[n] = intel_engine_lookup_user(i915, 160310be98a7SChris Wilson ci.engine_class, 160410be98a7SChris Wilson ci.engine_instance); 160510be98a7SChris Wilson if (!siblings[n]) { 1606d0bf4582SWambui Karuga drm_dbg(&i915->drm, 1607d0bf4582SWambui Karuga "Invalid sibling[%d]: { class:%d, inst:%d }\n", 160810be98a7SChris Wilson n, ci.engine_class, ci.engine_instance); 160910be98a7SChris Wilson err = -EINVAL; 161010be98a7SChris Wilson goto out_siblings; 161110be98a7SChris Wilson } 161210be98a7SChris Wilson } 161310be98a7SChris Wilson 1614e6ba7648SChris Wilson ce = intel_execlists_create_virtual(siblings, n); 161510be98a7SChris Wilson if (IS_ERR(ce)) { 161610be98a7SChris Wilson err = PTR_ERR(ce); 161710be98a7SChris Wilson goto out_siblings; 161810be98a7SChris Wilson } 161910be98a7SChris Wilson 1620e6ba7648SChris Wilson intel_context_set_gem(ce, set->ctx); 1621e6ba7648SChris Wilson 162210be98a7SChris Wilson if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { 162310be98a7SChris Wilson intel_context_put(ce); 162410be98a7SChris Wilson err = -EEXIST; 162510be98a7SChris Wilson goto out_siblings; 162610be98a7SChris Wilson } 162710be98a7SChris Wilson 162810be98a7SChris Wilson out_siblings: 162910be98a7SChris Wilson if (siblings != stack) 163010be98a7SChris Wilson kfree(siblings); 163110be98a7SChris Wilson 163210be98a7SChris Wilson return err; 163310be98a7SChris Wilson } 163410be98a7SChris Wilson 163510be98a7SChris Wilson static int 163610be98a7SChris Wilson set_engines__bond(struct i915_user_extension __user *base, void *data) 163710be98a7SChris Wilson { 163810be98a7SChris Wilson struct i915_context_engines_bond __user *ext = 163910be98a7SChris Wilson container_of_user(base, typeof(*ext), base); 164010be98a7SChris Wilson const struct set_engines *set = data; 1641d0bf4582SWambui Karuga struct drm_i915_private *i915 = set->ctx->i915; 164210be98a7SChris Wilson struct i915_engine_class_instance ci; 164310be98a7SChris Wilson struct intel_engine_cs *virtual; 164410be98a7SChris Wilson struct intel_engine_cs *master; 164510be98a7SChris Wilson u16 idx, num_bonds; 164610be98a7SChris Wilson int err, n; 164710be98a7SChris Wilson 164810be98a7SChris Wilson if (get_user(idx, &ext->virtual_index)) 164910be98a7SChris Wilson return -EFAULT; 165010be98a7SChris Wilson 165110be98a7SChris Wilson if (idx >= set->engines->num_engines) { 1652d0bf4582SWambui Karuga drm_dbg(&i915->drm, 1653d0bf4582SWambui Karuga "Invalid index for virtual engine: %d >= %d\n", 165410be98a7SChris Wilson idx, set->engines->num_engines); 165510be98a7SChris Wilson return -EINVAL; 165610be98a7SChris Wilson } 165710be98a7SChris Wilson 165810be98a7SChris Wilson idx = array_index_nospec(idx, set->engines->num_engines); 165910be98a7SChris Wilson if (!set->engines->engines[idx]) { 1660d0bf4582SWambui Karuga drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 166110be98a7SChris Wilson return -EINVAL; 166210be98a7SChris Wilson } 166310be98a7SChris Wilson virtual = set->engines->engines[idx]->engine; 166410be98a7SChris Wilson 166510be98a7SChris Wilson err = check_user_mbz(&ext->flags); 166610be98a7SChris Wilson if (err) 166710be98a7SChris Wilson return err; 166810be98a7SChris Wilson 166910be98a7SChris Wilson for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 167010be98a7SChris Wilson err = check_user_mbz(&ext->mbz64[n]); 167110be98a7SChris Wilson if (err) 167210be98a7SChris Wilson return err; 167310be98a7SChris Wilson } 167410be98a7SChris Wilson 167510be98a7SChris Wilson if (copy_from_user(&ci, &ext->master, sizeof(ci))) 167610be98a7SChris Wilson return -EFAULT; 167710be98a7SChris Wilson 1678d0bf4582SWambui Karuga master = intel_engine_lookup_user(i915, 167910be98a7SChris Wilson ci.engine_class, ci.engine_instance); 168010be98a7SChris Wilson if (!master) { 1681d0bf4582SWambui Karuga drm_dbg(&i915->drm, 1682d0bf4582SWambui Karuga "Unrecognised master engine: { class:%u, instance:%u }\n", 168310be98a7SChris Wilson ci.engine_class, ci.engine_instance); 168410be98a7SChris Wilson return -EINVAL; 168510be98a7SChris Wilson } 168610be98a7SChris Wilson 168710be98a7SChris Wilson if (get_user(num_bonds, &ext->num_bonds)) 168810be98a7SChris Wilson return -EFAULT; 168910be98a7SChris Wilson 169010be98a7SChris Wilson for (n = 0; n < num_bonds; n++) { 169110be98a7SChris Wilson struct intel_engine_cs *bond; 169210be98a7SChris Wilson 169310be98a7SChris Wilson if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 169410be98a7SChris Wilson return -EFAULT; 169510be98a7SChris Wilson 1696d0bf4582SWambui Karuga bond = intel_engine_lookup_user(i915, 169710be98a7SChris Wilson ci.engine_class, 169810be98a7SChris Wilson ci.engine_instance); 169910be98a7SChris Wilson if (!bond) { 1700d0bf4582SWambui Karuga drm_dbg(&i915->drm, 1701d0bf4582SWambui Karuga "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 170210be98a7SChris Wilson n, ci.engine_class, ci.engine_instance); 170310be98a7SChris Wilson return -EINVAL; 170410be98a7SChris Wilson } 170510be98a7SChris Wilson 170610be98a7SChris Wilson /* 170710be98a7SChris Wilson * A non-virtual engine has no siblings to choose between; and 170810be98a7SChris Wilson * a submit fence will always be directed to the one engine. 170910be98a7SChris Wilson */ 171010be98a7SChris Wilson if (intel_engine_is_virtual(virtual)) { 171110be98a7SChris Wilson err = intel_virtual_engine_attach_bond(virtual, 171210be98a7SChris Wilson master, 171310be98a7SChris Wilson bond); 171410be98a7SChris Wilson if (err) 171510be98a7SChris Wilson return err; 171610be98a7SChris Wilson } 171710be98a7SChris Wilson } 171810be98a7SChris Wilson 171910be98a7SChris Wilson return 0; 172010be98a7SChris Wilson } 172110be98a7SChris Wilson 172210be98a7SChris Wilson static const i915_user_extension_fn set_engines__extensions[] = { 172310be98a7SChris Wilson [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, 172410be98a7SChris Wilson [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, 172510be98a7SChris Wilson }; 172610be98a7SChris Wilson 172710be98a7SChris Wilson static int 172810be98a7SChris Wilson set_engines(struct i915_gem_context *ctx, 172910be98a7SChris Wilson const struct drm_i915_gem_context_param *args) 173010be98a7SChris Wilson { 1731baa89ba3SWambui Karuga struct drm_i915_private *i915 = ctx->i915; 173210be98a7SChris Wilson struct i915_context_param_engines __user *user = 173310be98a7SChris Wilson u64_to_user_ptr(args->value); 173410be98a7SChris Wilson struct set_engines set = { .ctx = ctx }; 173510be98a7SChris Wilson unsigned int num_engines, n; 173610be98a7SChris Wilson u64 extensions; 173710be98a7SChris Wilson int err; 173810be98a7SChris Wilson 173910be98a7SChris Wilson if (!args->size) { /* switch back to legacy user_ring_map */ 174010be98a7SChris Wilson if (!i915_gem_context_user_engines(ctx)) 174110be98a7SChris Wilson return 0; 174210be98a7SChris Wilson 174310be98a7SChris Wilson set.engines = default_engines(ctx); 174410be98a7SChris Wilson if (IS_ERR(set.engines)) 174510be98a7SChris Wilson return PTR_ERR(set.engines); 174610be98a7SChris Wilson 174710be98a7SChris Wilson goto replace; 174810be98a7SChris Wilson } 174910be98a7SChris Wilson 175010be98a7SChris Wilson BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); 175110be98a7SChris Wilson if (args->size < sizeof(*user) || 175210be98a7SChris Wilson !IS_ALIGNED(args->size, sizeof(*user->engines))) { 1753baa89ba3SWambui Karuga drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 175410be98a7SChris Wilson args->size); 175510be98a7SChris Wilson return -EINVAL; 175610be98a7SChris Wilson } 175710be98a7SChris Wilson 175810be98a7SChris Wilson /* 175910be98a7SChris Wilson * Note that I915_EXEC_RING_MASK limits execbuf to only using the 176010be98a7SChris Wilson * first 64 engines defined here. 176110be98a7SChris Wilson */ 176210be98a7SChris Wilson num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 176370c96e39SChris Wilson set.engines = alloc_engines(num_engines); 176410be98a7SChris Wilson if (!set.engines) 176510be98a7SChris Wilson return -ENOMEM; 176610be98a7SChris Wilson 176710be98a7SChris Wilson for (n = 0; n < num_engines; n++) { 176810be98a7SChris Wilson struct i915_engine_class_instance ci; 176910be98a7SChris Wilson struct intel_engine_cs *engine; 177048ae397bSChris Wilson struct intel_context *ce; 177110be98a7SChris Wilson 177210be98a7SChris Wilson if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 177310be98a7SChris Wilson __free_engines(set.engines, n); 177410be98a7SChris Wilson return -EFAULT; 177510be98a7SChris Wilson } 177610be98a7SChris Wilson 177710be98a7SChris Wilson if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 177810be98a7SChris Wilson ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { 177910be98a7SChris Wilson set.engines->engines[n] = NULL; 178010be98a7SChris Wilson continue; 178110be98a7SChris Wilson } 178210be98a7SChris Wilson 178310be98a7SChris Wilson engine = intel_engine_lookup_user(ctx->i915, 178410be98a7SChris Wilson ci.engine_class, 178510be98a7SChris Wilson ci.engine_instance); 178610be98a7SChris Wilson if (!engine) { 1787baa89ba3SWambui Karuga drm_dbg(&i915->drm, 1788baa89ba3SWambui Karuga "Invalid engine[%d]: { class:%d, instance:%d }\n", 178910be98a7SChris Wilson n, ci.engine_class, ci.engine_instance); 179010be98a7SChris Wilson __free_engines(set.engines, n); 179110be98a7SChris Wilson return -ENOENT; 179210be98a7SChris Wilson } 179310be98a7SChris Wilson 1794e6ba7648SChris Wilson ce = intel_context_create(engine); 179548ae397bSChris Wilson if (IS_ERR(ce)) { 179610be98a7SChris Wilson __free_engines(set.engines, n); 179748ae397bSChris Wilson return PTR_ERR(ce); 179810be98a7SChris Wilson } 179948ae397bSChris Wilson 1800e6ba7648SChris Wilson intel_context_set_gem(ce, ctx); 1801e6ba7648SChris Wilson 180248ae397bSChris Wilson set.engines->engines[n] = ce; 180310be98a7SChris Wilson } 180410be98a7SChris Wilson set.engines->num_engines = num_engines; 180510be98a7SChris Wilson 180610be98a7SChris Wilson err = -EFAULT; 180710be98a7SChris Wilson if (!get_user(extensions, &user->extensions)) 180810be98a7SChris Wilson err = i915_user_extensions(u64_to_user_ptr(extensions), 180910be98a7SChris Wilson set_engines__extensions, 181010be98a7SChris Wilson ARRAY_SIZE(set_engines__extensions), 181110be98a7SChris Wilson &set); 181210be98a7SChris Wilson if (err) { 181310be98a7SChris Wilson free_engines(set.engines); 181410be98a7SChris Wilson return err; 181510be98a7SChris Wilson } 181610be98a7SChris Wilson 181710be98a7SChris Wilson replace: 181810be98a7SChris Wilson mutex_lock(&ctx->engines_mutex); 1819130a95e9SChris Wilson if (i915_gem_context_is_closed(ctx)) { 1820130a95e9SChris Wilson mutex_unlock(&ctx->engines_mutex); 1821130a95e9SChris Wilson free_engines(set.engines); 1822130a95e9SChris Wilson return -ENOENT; 1823130a95e9SChris Wilson } 182410be98a7SChris Wilson if (args->size) 182510be98a7SChris Wilson i915_gem_context_set_user_engines(ctx); 182610be98a7SChris Wilson else 182710be98a7SChris Wilson i915_gem_context_clear_user_engines(ctx); 18281feace5dSPaul E. McKenney set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); 182910be98a7SChris Wilson mutex_unlock(&ctx->engines_mutex); 183010be98a7SChris Wilson 183142fb60deSChris Wilson /* Keep track of old engine sets for kill_context() */ 1832130a95e9SChris Wilson engines_idle_release(ctx, set.engines); 183310be98a7SChris Wilson 183410be98a7SChris Wilson return 0; 183510be98a7SChris Wilson } 183610be98a7SChris Wilson 183710be98a7SChris Wilson static struct i915_gem_engines * 183810be98a7SChris Wilson __copy_engines(struct i915_gem_engines *e) 183910be98a7SChris Wilson { 184010be98a7SChris Wilson struct i915_gem_engines *copy; 184110be98a7SChris Wilson unsigned int n; 184210be98a7SChris Wilson 184370c96e39SChris Wilson copy = alloc_engines(e->num_engines); 184410be98a7SChris Wilson if (!copy) 184510be98a7SChris Wilson return ERR_PTR(-ENOMEM); 184610be98a7SChris Wilson 184710be98a7SChris Wilson for (n = 0; n < e->num_engines; n++) { 184810be98a7SChris Wilson if (e->engines[n]) 184910be98a7SChris Wilson copy->engines[n] = intel_context_get(e->engines[n]); 185010be98a7SChris Wilson else 185110be98a7SChris Wilson copy->engines[n] = NULL; 185210be98a7SChris Wilson } 185310be98a7SChris Wilson copy->num_engines = n; 185410be98a7SChris Wilson 185510be98a7SChris Wilson return copy; 185610be98a7SChris Wilson } 185710be98a7SChris Wilson 185810be98a7SChris Wilson static int 185910be98a7SChris Wilson get_engines(struct i915_gem_context *ctx, 186010be98a7SChris Wilson struct drm_i915_gem_context_param *args) 186110be98a7SChris Wilson { 186210be98a7SChris Wilson struct i915_context_param_engines __user *user; 186310be98a7SChris Wilson struct i915_gem_engines *e; 186410be98a7SChris Wilson size_t n, count, size; 186510be98a7SChris Wilson int err = 0; 186610be98a7SChris Wilson 186710be98a7SChris Wilson err = mutex_lock_interruptible(&ctx->engines_mutex); 186810be98a7SChris Wilson if (err) 186910be98a7SChris Wilson return err; 187010be98a7SChris Wilson 187110be98a7SChris Wilson e = NULL; 187210be98a7SChris Wilson if (i915_gem_context_user_engines(ctx)) 187310be98a7SChris Wilson e = __copy_engines(i915_gem_context_engines(ctx)); 187410be98a7SChris Wilson mutex_unlock(&ctx->engines_mutex); 187510be98a7SChris Wilson if (IS_ERR_OR_NULL(e)) { 187610be98a7SChris Wilson args->size = 0; 187710be98a7SChris Wilson return PTR_ERR_OR_ZERO(e); 187810be98a7SChris Wilson } 187910be98a7SChris Wilson 188010be98a7SChris Wilson count = e->num_engines; 188110be98a7SChris Wilson 188210be98a7SChris Wilson /* Be paranoid in case we have an impedance mismatch */ 188310be98a7SChris Wilson if (!check_struct_size(user, engines, count, &size)) { 188410be98a7SChris Wilson err = -EINVAL; 188510be98a7SChris Wilson goto err_free; 188610be98a7SChris Wilson } 188710be98a7SChris Wilson if (overflows_type(size, args->size)) { 188810be98a7SChris Wilson err = -EINVAL; 188910be98a7SChris Wilson goto err_free; 189010be98a7SChris Wilson } 189110be98a7SChris Wilson 189210be98a7SChris Wilson if (!args->size) { 189310be98a7SChris Wilson args->size = size; 189410be98a7SChris Wilson goto err_free; 189510be98a7SChris Wilson } 189610be98a7SChris Wilson 189710be98a7SChris Wilson if (args->size < size) { 189810be98a7SChris Wilson err = -EINVAL; 189910be98a7SChris Wilson goto err_free; 190010be98a7SChris Wilson } 190110be98a7SChris Wilson 190210be98a7SChris Wilson user = u64_to_user_ptr(args->value); 190310be98a7SChris Wilson if (put_user(0, &user->extensions)) { 190410be98a7SChris Wilson err = -EFAULT; 190510be98a7SChris Wilson goto err_free; 190610be98a7SChris Wilson } 190710be98a7SChris Wilson 190810be98a7SChris Wilson for (n = 0; n < count; n++) { 190910be98a7SChris Wilson struct i915_engine_class_instance ci = { 191010be98a7SChris Wilson .engine_class = I915_ENGINE_CLASS_INVALID, 191110be98a7SChris Wilson .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, 191210be98a7SChris Wilson }; 191310be98a7SChris Wilson 191410be98a7SChris Wilson if (e->engines[n]) { 191510be98a7SChris Wilson ci.engine_class = e->engines[n]->engine->uabi_class; 1916750e76b4SChris Wilson ci.engine_instance = e->engines[n]->engine->uabi_instance; 191710be98a7SChris Wilson } 191810be98a7SChris Wilson 191910be98a7SChris Wilson if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { 192010be98a7SChris Wilson err = -EFAULT; 192110be98a7SChris Wilson goto err_free; 192210be98a7SChris Wilson } 192310be98a7SChris Wilson } 192410be98a7SChris Wilson 192510be98a7SChris Wilson args->size = size; 192610be98a7SChris Wilson 192710be98a7SChris Wilson err_free: 1928155ab883SChris Wilson free_engines(e); 192910be98a7SChris Wilson return err; 193010be98a7SChris Wilson } 193110be98a7SChris Wilson 1932a0e04715SChris Wilson static int 1933a0e04715SChris Wilson set_persistence(struct i915_gem_context *ctx, 1934a0e04715SChris Wilson const struct drm_i915_gem_context_param *args) 1935a0e04715SChris Wilson { 1936a0e04715SChris Wilson if (args->size) 1937a0e04715SChris Wilson return -EINVAL; 1938a0e04715SChris Wilson 1939a0e04715SChris Wilson return __context_set_persistence(ctx, args->value); 1940a0e04715SChris Wilson } 1941a0e04715SChris Wilson 194288be76cdSChris Wilson static int __apply_priority(struct intel_context *ce, void *arg) 19430f100b70SChris Wilson { 19440f100b70SChris Wilson struct i915_gem_context *ctx = arg; 19450f100b70SChris Wilson 19460eb670aaSChris Wilson if (!intel_engine_has_timeslices(ce->engine)) 194788be76cdSChris Wilson return 0; 19480f100b70SChris Wilson 19490f100b70SChris Wilson if (ctx->sched.priority >= I915_PRIORITY_NORMAL) 19500f100b70SChris Wilson intel_context_set_use_semaphores(ce); 19510f100b70SChris Wilson else 19520f100b70SChris Wilson intel_context_clear_use_semaphores(ce); 195388be76cdSChris Wilson 195488be76cdSChris Wilson return 0; 19550f100b70SChris Wilson } 19560f100b70SChris Wilson 19570f100b70SChris Wilson static int set_priority(struct i915_gem_context *ctx, 19580f100b70SChris Wilson const struct drm_i915_gem_context_param *args) 19590f100b70SChris Wilson { 19600f100b70SChris Wilson s64 priority = args->value; 19610f100b70SChris Wilson 19620f100b70SChris Wilson if (args->size) 19630f100b70SChris Wilson return -EINVAL; 19640f100b70SChris Wilson 19650f100b70SChris Wilson if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 19660f100b70SChris Wilson return -ENODEV; 19670f100b70SChris Wilson 19680f100b70SChris Wilson if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 19690f100b70SChris Wilson priority < I915_CONTEXT_MIN_USER_PRIORITY) 19700f100b70SChris Wilson return -EINVAL; 19710f100b70SChris Wilson 19720f100b70SChris Wilson if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 19730f100b70SChris Wilson !capable(CAP_SYS_NICE)) 19740f100b70SChris Wilson return -EPERM; 19750f100b70SChris Wilson 19760f100b70SChris Wilson ctx->sched.priority = I915_USER_PRIORITY(priority); 19770f100b70SChris Wilson context_apply_all(ctx, __apply_priority, ctx); 19780f100b70SChris Wilson 19790f100b70SChris Wilson return 0; 19800f100b70SChris Wilson } 19810f100b70SChris Wilson 198210be98a7SChris Wilson static int ctx_setparam(struct drm_i915_file_private *fpriv, 198310be98a7SChris Wilson struct i915_gem_context *ctx, 198410be98a7SChris Wilson struct drm_i915_gem_context_param *args) 198510be98a7SChris Wilson { 198610be98a7SChris Wilson int ret = 0; 198710be98a7SChris Wilson 198810be98a7SChris Wilson switch (args->param) { 198910be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ZEROMAP: 199010be98a7SChris Wilson if (args->size) 199110be98a7SChris Wilson ret = -EINVAL; 199210be98a7SChris Wilson else if (args->value) 199310be98a7SChris Wilson set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 199410be98a7SChris Wilson else 199510be98a7SChris Wilson clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 199610be98a7SChris Wilson break; 199710be98a7SChris Wilson 199810be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 199910be98a7SChris Wilson if (args->size) 200010be98a7SChris Wilson ret = -EINVAL; 200110be98a7SChris Wilson else if (args->value) 200210be98a7SChris Wilson i915_gem_context_set_no_error_capture(ctx); 200310be98a7SChris Wilson else 200410be98a7SChris Wilson i915_gem_context_clear_no_error_capture(ctx); 200510be98a7SChris Wilson break; 200610be98a7SChris Wilson 200710be98a7SChris Wilson case I915_CONTEXT_PARAM_BANNABLE: 200810be98a7SChris Wilson if (args->size) 200910be98a7SChris Wilson ret = -EINVAL; 201010be98a7SChris Wilson else if (!capable(CAP_SYS_ADMIN) && !args->value) 201110be98a7SChris Wilson ret = -EPERM; 201210be98a7SChris Wilson else if (args->value) 201310be98a7SChris Wilson i915_gem_context_set_bannable(ctx); 201410be98a7SChris Wilson else 201510be98a7SChris Wilson i915_gem_context_clear_bannable(ctx); 201610be98a7SChris Wilson break; 201710be98a7SChris Wilson 201810be98a7SChris Wilson case I915_CONTEXT_PARAM_RECOVERABLE: 201910be98a7SChris Wilson if (args->size) 202010be98a7SChris Wilson ret = -EINVAL; 202110be98a7SChris Wilson else if (args->value) 202210be98a7SChris Wilson i915_gem_context_set_recoverable(ctx); 202310be98a7SChris Wilson else 202410be98a7SChris Wilson i915_gem_context_clear_recoverable(ctx); 202510be98a7SChris Wilson break; 202610be98a7SChris Wilson 202710be98a7SChris Wilson case I915_CONTEXT_PARAM_PRIORITY: 20280f100b70SChris Wilson ret = set_priority(ctx, args); 202910be98a7SChris Wilson break; 203010be98a7SChris Wilson 203110be98a7SChris Wilson case I915_CONTEXT_PARAM_SSEU: 203210be98a7SChris Wilson ret = set_sseu(ctx, args); 203310be98a7SChris Wilson break; 203410be98a7SChris Wilson 203510be98a7SChris Wilson case I915_CONTEXT_PARAM_VM: 203610be98a7SChris Wilson ret = set_ppgtt(fpriv, ctx, args); 203710be98a7SChris Wilson break; 203810be98a7SChris Wilson 203910be98a7SChris Wilson case I915_CONTEXT_PARAM_ENGINES: 204010be98a7SChris Wilson ret = set_engines(ctx, args); 204110be98a7SChris Wilson break; 204210be98a7SChris Wilson 2043a0e04715SChris Wilson case I915_CONTEXT_PARAM_PERSISTENCE: 2044a0e04715SChris Wilson ret = set_persistence(ctx, args); 2045a0e04715SChris Wilson break; 2046a0e04715SChris Wilson 204788be76cdSChris Wilson case I915_CONTEXT_PARAM_RINGSIZE: 204888be76cdSChris Wilson ret = set_ringsize(ctx, args); 204988be76cdSChris Wilson break; 205088be76cdSChris Wilson 205110be98a7SChris Wilson case I915_CONTEXT_PARAM_BAN_PERIOD: 205210be98a7SChris Wilson default: 205310be98a7SChris Wilson ret = -EINVAL; 205410be98a7SChris Wilson break; 205510be98a7SChris Wilson } 205610be98a7SChris Wilson 205710be98a7SChris Wilson return ret; 205810be98a7SChris Wilson } 205910be98a7SChris Wilson 206010be98a7SChris Wilson struct create_ext { 206110be98a7SChris Wilson struct i915_gem_context *ctx; 206210be98a7SChris Wilson struct drm_i915_file_private *fpriv; 206310be98a7SChris Wilson }; 206410be98a7SChris Wilson 206510be98a7SChris Wilson static int create_setparam(struct i915_user_extension __user *ext, void *data) 206610be98a7SChris Wilson { 206710be98a7SChris Wilson struct drm_i915_gem_context_create_ext_setparam local; 206810be98a7SChris Wilson const struct create_ext *arg = data; 206910be98a7SChris Wilson 207010be98a7SChris Wilson if (copy_from_user(&local, ext, sizeof(local))) 207110be98a7SChris Wilson return -EFAULT; 207210be98a7SChris Wilson 207310be98a7SChris Wilson if (local.param.ctx_id) 207410be98a7SChris Wilson return -EINVAL; 207510be98a7SChris Wilson 207610be98a7SChris Wilson return ctx_setparam(arg->fpriv, arg->ctx, &local.param); 207710be98a7SChris Wilson } 207810be98a7SChris Wilson 207988be76cdSChris Wilson static int copy_ring_size(struct intel_context *dst, 208088be76cdSChris Wilson struct intel_context *src) 208188be76cdSChris Wilson { 208288be76cdSChris Wilson long sz; 208388be76cdSChris Wilson 208488be76cdSChris Wilson sz = intel_context_get_ring_size(src); 208588be76cdSChris Wilson if (sz < 0) 208688be76cdSChris Wilson return sz; 208788be76cdSChris Wilson 208888be76cdSChris Wilson return intel_context_set_ring_size(dst, sz); 208988be76cdSChris Wilson } 209088be76cdSChris Wilson 209110be98a7SChris Wilson static int clone_engines(struct i915_gem_context *dst, 209210be98a7SChris Wilson struct i915_gem_context *src) 209310be98a7SChris Wilson { 209410be98a7SChris Wilson struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 209510be98a7SChris Wilson struct i915_gem_engines *clone; 209610be98a7SChris Wilson bool user_engines; 209710be98a7SChris Wilson unsigned long n; 209810be98a7SChris Wilson 209970c96e39SChris Wilson clone = alloc_engines(e->num_engines); 210010be98a7SChris Wilson if (!clone) 210110be98a7SChris Wilson goto err_unlock; 210210be98a7SChris Wilson 210310be98a7SChris Wilson for (n = 0; n < e->num_engines; n++) { 210410be98a7SChris Wilson struct intel_engine_cs *engine; 210510be98a7SChris Wilson 210610be98a7SChris Wilson if (!e->engines[n]) { 210710be98a7SChris Wilson clone->engines[n] = NULL; 210810be98a7SChris Wilson continue; 210910be98a7SChris Wilson } 211010be98a7SChris Wilson engine = e->engines[n]->engine; 211110be98a7SChris Wilson 211210be98a7SChris Wilson /* 211310be98a7SChris Wilson * Virtual engines are singletons; they can only exist 211410be98a7SChris Wilson * inside a single context, because they embed their 211510be98a7SChris Wilson * HW context... As each virtual context implies a single 211610be98a7SChris Wilson * timeline (each engine can only dequeue a single request 211710be98a7SChris Wilson * at any time), it would be surprising for two contexts 211810be98a7SChris Wilson * to use the same engine. So let's create a copy of 211910be98a7SChris Wilson * the virtual engine instead. 212010be98a7SChris Wilson */ 212110be98a7SChris Wilson if (intel_engine_is_virtual(engine)) 212210be98a7SChris Wilson clone->engines[n] = 2123e6ba7648SChris Wilson intel_execlists_clone_virtual(engine); 212410be98a7SChris Wilson else 2125e6ba7648SChris Wilson clone->engines[n] = intel_context_create(engine); 212610be98a7SChris Wilson if (IS_ERR_OR_NULL(clone->engines[n])) { 212710be98a7SChris Wilson __free_engines(clone, n); 212810be98a7SChris Wilson goto err_unlock; 212910be98a7SChris Wilson } 2130e6ba7648SChris Wilson 2131e6ba7648SChris Wilson intel_context_set_gem(clone->engines[n], dst); 213288be76cdSChris Wilson 213388be76cdSChris Wilson /* Copy across the preferred ringsize */ 213488be76cdSChris Wilson if (copy_ring_size(clone->engines[n], e->engines[n])) { 213588be76cdSChris Wilson __free_engines(clone, n + 1); 213688be76cdSChris Wilson goto err_unlock; 213788be76cdSChris Wilson } 213810be98a7SChris Wilson } 213910be98a7SChris Wilson clone->num_engines = n; 214010be98a7SChris Wilson 214110be98a7SChris Wilson user_engines = i915_gem_context_user_engines(src); 214210be98a7SChris Wilson i915_gem_context_unlock_engines(src); 214310be98a7SChris Wilson 2144d96bb620SChris Wilson /* Serialised by constructor */ 2145130a95e9SChris Wilson engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1)); 214610be98a7SChris Wilson if (user_engines) 214710be98a7SChris Wilson i915_gem_context_set_user_engines(dst); 214810be98a7SChris Wilson else 214910be98a7SChris Wilson i915_gem_context_clear_user_engines(dst); 215010be98a7SChris Wilson return 0; 215110be98a7SChris Wilson 215210be98a7SChris Wilson err_unlock: 215310be98a7SChris Wilson i915_gem_context_unlock_engines(src); 215410be98a7SChris Wilson return -ENOMEM; 215510be98a7SChris Wilson } 215610be98a7SChris Wilson 215710be98a7SChris Wilson static int clone_flags(struct i915_gem_context *dst, 215810be98a7SChris Wilson struct i915_gem_context *src) 215910be98a7SChris Wilson { 216010be98a7SChris Wilson dst->user_flags = src->user_flags; 216110be98a7SChris Wilson return 0; 216210be98a7SChris Wilson } 216310be98a7SChris Wilson 216410be98a7SChris Wilson static int clone_schedattr(struct i915_gem_context *dst, 216510be98a7SChris Wilson struct i915_gem_context *src) 216610be98a7SChris Wilson { 216710be98a7SChris Wilson dst->sched = src->sched; 216810be98a7SChris Wilson return 0; 216910be98a7SChris Wilson } 217010be98a7SChris Wilson 217110be98a7SChris Wilson static int clone_sseu(struct i915_gem_context *dst, 217210be98a7SChris Wilson struct i915_gem_context *src) 217310be98a7SChris Wilson { 217410be98a7SChris Wilson struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 217510be98a7SChris Wilson struct i915_gem_engines *clone; 217610be98a7SChris Wilson unsigned long n; 217710be98a7SChris Wilson int err; 217810be98a7SChris Wilson 2179d96bb620SChris Wilson /* no locking required; sole access under constructor*/ 2180d96bb620SChris Wilson clone = __context_engines_static(dst); 218110be98a7SChris Wilson if (e->num_engines != clone->num_engines) { 218210be98a7SChris Wilson err = -EINVAL; 218310be98a7SChris Wilson goto unlock; 218410be98a7SChris Wilson } 218510be98a7SChris Wilson 218610be98a7SChris Wilson for (n = 0; n < e->num_engines; n++) { 218710be98a7SChris Wilson struct intel_context *ce = e->engines[n]; 218810be98a7SChris Wilson 218910be98a7SChris Wilson if (clone->engines[n]->engine->class != ce->engine->class) { 219010be98a7SChris Wilson /* Must have compatible engine maps! */ 219110be98a7SChris Wilson err = -EINVAL; 219210be98a7SChris Wilson goto unlock; 219310be98a7SChris Wilson } 219410be98a7SChris Wilson 219510be98a7SChris Wilson /* serialises with set_sseu */ 219610be98a7SChris Wilson err = intel_context_lock_pinned(ce); 219710be98a7SChris Wilson if (err) 219810be98a7SChris Wilson goto unlock; 219910be98a7SChris Wilson 220010be98a7SChris Wilson clone->engines[n]->sseu = ce->sseu; 220110be98a7SChris Wilson intel_context_unlock_pinned(ce); 220210be98a7SChris Wilson } 220310be98a7SChris Wilson 220410be98a7SChris Wilson err = 0; 220510be98a7SChris Wilson unlock: 220610be98a7SChris Wilson i915_gem_context_unlock_engines(src); 220710be98a7SChris Wilson return err; 220810be98a7SChris Wilson } 220910be98a7SChris Wilson 221010be98a7SChris Wilson static int clone_timeline(struct i915_gem_context *dst, 221110be98a7SChris Wilson struct i915_gem_context *src) 221210be98a7SChris Wilson { 221375d0a7f3SChris Wilson if (src->timeline) 221475d0a7f3SChris Wilson __assign_timeline(dst, src->timeline); 221510be98a7SChris Wilson 221610be98a7SChris Wilson return 0; 221710be98a7SChris Wilson } 221810be98a7SChris Wilson 221910be98a7SChris Wilson static int clone_vm(struct i915_gem_context *dst, 222010be98a7SChris Wilson struct i915_gem_context *src) 222110be98a7SChris Wilson { 2222e568ac38SChris Wilson struct i915_address_space *vm; 2223a4e7ccdaSChris Wilson int err = 0; 222410be98a7SChris Wilson 222527dbae8fSChris Wilson if (!rcu_access_pointer(src->vm)) 222627dbae8fSChris Wilson return 0; 222727dbae8fSChris Wilson 222810be98a7SChris Wilson rcu_read_lock(); 222927dbae8fSChris Wilson vm = context_get_vm_rcu(src); 223010be98a7SChris Wilson rcu_read_unlock(); 223110be98a7SChris Wilson 2232a4e7ccdaSChris Wilson if (!mutex_lock_interruptible(&dst->mutex)) { 2233e568ac38SChris Wilson __assign_ppgtt(dst, vm); 2234a4e7ccdaSChris Wilson mutex_unlock(&dst->mutex); 2235a4e7ccdaSChris Wilson } else { 2236a4e7ccdaSChris Wilson err = -EINTR; 2237a4e7ccdaSChris Wilson } 223810be98a7SChris Wilson 223927dbae8fSChris Wilson i915_vm_put(vm); 2240a4e7ccdaSChris Wilson return err; 224110be98a7SChris Wilson } 224210be98a7SChris Wilson 224310be98a7SChris Wilson static int create_clone(struct i915_user_extension __user *ext, void *data) 224410be98a7SChris Wilson { 224510be98a7SChris Wilson static int (* const fn[])(struct i915_gem_context *dst, 224610be98a7SChris Wilson struct i915_gem_context *src) = { 224710be98a7SChris Wilson #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y 224810be98a7SChris Wilson MAP(ENGINES, clone_engines), 224910be98a7SChris Wilson MAP(FLAGS, clone_flags), 225010be98a7SChris Wilson MAP(SCHEDATTR, clone_schedattr), 225110be98a7SChris Wilson MAP(SSEU, clone_sseu), 225210be98a7SChris Wilson MAP(TIMELINE, clone_timeline), 225310be98a7SChris Wilson MAP(VM, clone_vm), 225410be98a7SChris Wilson #undef MAP 225510be98a7SChris Wilson }; 225610be98a7SChris Wilson struct drm_i915_gem_context_create_ext_clone local; 225710be98a7SChris Wilson const struct create_ext *arg = data; 225810be98a7SChris Wilson struct i915_gem_context *dst = arg->ctx; 225910be98a7SChris Wilson struct i915_gem_context *src; 226010be98a7SChris Wilson int err, bit; 226110be98a7SChris Wilson 226210be98a7SChris Wilson if (copy_from_user(&local, ext, sizeof(local))) 226310be98a7SChris Wilson return -EFAULT; 226410be98a7SChris Wilson 226510be98a7SChris Wilson BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != 226610be98a7SChris Wilson I915_CONTEXT_CLONE_UNKNOWN); 226710be98a7SChris Wilson 226810be98a7SChris Wilson if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) 226910be98a7SChris Wilson return -EINVAL; 227010be98a7SChris Wilson 227110be98a7SChris Wilson if (local.rsvd) 227210be98a7SChris Wilson return -EINVAL; 227310be98a7SChris Wilson 227410be98a7SChris Wilson rcu_read_lock(); 227510be98a7SChris Wilson src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); 227610be98a7SChris Wilson rcu_read_unlock(); 227710be98a7SChris Wilson if (!src) 227810be98a7SChris Wilson return -ENOENT; 227910be98a7SChris Wilson 228010be98a7SChris Wilson GEM_BUG_ON(src == dst); 228110be98a7SChris Wilson 228210be98a7SChris Wilson for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { 228310be98a7SChris Wilson if (!(local.flags & BIT(bit))) 228410be98a7SChris Wilson continue; 228510be98a7SChris Wilson 228610be98a7SChris Wilson err = fn[bit](dst, src); 228710be98a7SChris Wilson if (err) 228810be98a7SChris Wilson return err; 228910be98a7SChris Wilson } 229010be98a7SChris Wilson 229110be98a7SChris Wilson return 0; 229210be98a7SChris Wilson } 229310be98a7SChris Wilson 229410be98a7SChris Wilson static const i915_user_extension_fn create_extensions[] = { 229510be98a7SChris Wilson [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 229610be98a7SChris Wilson [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, 229710be98a7SChris Wilson }; 229810be98a7SChris Wilson 229910be98a7SChris Wilson static bool client_is_banned(struct drm_i915_file_private *file_priv) 230010be98a7SChris Wilson { 230110be98a7SChris Wilson return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 230210be98a7SChris Wilson } 230310be98a7SChris Wilson 230410be98a7SChris Wilson int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 230510be98a7SChris Wilson struct drm_file *file) 230610be98a7SChris Wilson { 230710be98a7SChris Wilson struct drm_i915_private *i915 = to_i915(dev); 230810be98a7SChris Wilson struct drm_i915_gem_context_create_ext *args = data; 230910be98a7SChris Wilson struct create_ext ext_data; 231010be98a7SChris Wilson int ret; 2311c100777cSTvrtko Ursulin u32 id; 231210be98a7SChris Wilson 231310be98a7SChris Wilson if (!DRIVER_CAPS(i915)->has_logical_contexts) 231410be98a7SChris Wilson return -ENODEV; 231510be98a7SChris Wilson 231610be98a7SChris Wilson if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 231710be98a7SChris Wilson return -EINVAL; 231810be98a7SChris Wilson 2319cb823ed9SChris Wilson ret = intel_gt_terminally_wedged(&i915->gt); 232010be98a7SChris Wilson if (ret) 232110be98a7SChris Wilson return ret; 232210be98a7SChris Wilson 232310be98a7SChris Wilson ext_data.fpriv = file->driver_priv; 232410be98a7SChris Wilson if (client_is_banned(ext_data.fpriv)) { 2325baa89ba3SWambui Karuga drm_dbg(&i915->drm, 2326baa89ba3SWambui Karuga "client %s[%d] banned from creating ctx\n", 2327ba16a48aSTvrtko Ursulin current->comm, task_pid_nr(current)); 232810be98a7SChris Wilson return -EIO; 232910be98a7SChris Wilson } 233010be98a7SChris Wilson 233110be98a7SChris Wilson ext_data.ctx = i915_gem_create_context(i915, args->flags); 233210be98a7SChris Wilson if (IS_ERR(ext_data.ctx)) 233310be98a7SChris Wilson return PTR_ERR(ext_data.ctx); 233410be98a7SChris Wilson 233510be98a7SChris Wilson if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 233610be98a7SChris Wilson ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 233710be98a7SChris Wilson create_extensions, 233810be98a7SChris Wilson ARRAY_SIZE(create_extensions), 233910be98a7SChris Wilson &ext_data); 234010be98a7SChris Wilson if (ret) 234110be98a7SChris Wilson goto err_ctx; 234210be98a7SChris Wilson } 234310be98a7SChris Wilson 2344c100777cSTvrtko Ursulin ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); 234510be98a7SChris Wilson if (ret < 0) 234610be98a7SChris Wilson goto err_ctx; 234710be98a7SChris Wilson 2348c100777cSTvrtko Ursulin args->ctx_id = id; 2349baa89ba3SWambui Karuga drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); 235010be98a7SChris Wilson 235110be98a7SChris Wilson return 0; 235210be98a7SChris Wilson 235310be98a7SChris Wilson err_ctx: 235410be98a7SChris Wilson context_close(ext_data.ctx); 235510be98a7SChris Wilson return ret; 235610be98a7SChris Wilson } 235710be98a7SChris Wilson 235810be98a7SChris Wilson int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 235910be98a7SChris Wilson struct drm_file *file) 236010be98a7SChris Wilson { 236110be98a7SChris Wilson struct drm_i915_gem_context_destroy *args = data; 236210be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 236310be98a7SChris Wilson struct i915_gem_context *ctx; 236410be98a7SChris Wilson 236510be98a7SChris Wilson if (args->pad != 0) 236610be98a7SChris Wilson return -EINVAL; 236710be98a7SChris Wilson 236810be98a7SChris Wilson if (!args->ctx_id) 236910be98a7SChris Wilson return -ENOENT; 237010be98a7SChris Wilson 2371c100777cSTvrtko Ursulin ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 237210be98a7SChris Wilson if (!ctx) 237310be98a7SChris Wilson return -ENOENT; 237410be98a7SChris Wilson 237510be98a7SChris Wilson context_close(ctx); 237610be98a7SChris Wilson return 0; 237710be98a7SChris Wilson } 237810be98a7SChris Wilson 237910be98a7SChris Wilson static int get_sseu(struct i915_gem_context *ctx, 238010be98a7SChris Wilson struct drm_i915_gem_context_param *args) 238110be98a7SChris Wilson { 238210be98a7SChris Wilson struct drm_i915_gem_context_param_sseu user_sseu; 238310be98a7SChris Wilson struct intel_context *ce; 238410be98a7SChris Wilson unsigned long lookup; 238510be98a7SChris Wilson int err; 238610be98a7SChris Wilson 238710be98a7SChris Wilson if (args->size == 0) 238810be98a7SChris Wilson goto out; 238910be98a7SChris Wilson else if (args->size < sizeof(user_sseu)) 239010be98a7SChris Wilson return -EINVAL; 239110be98a7SChris Wilson 239210be98a7SChris Wilson if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 239310be98a7SChris Wilson sizeof(user_sseu))) 239410be98a7SChris Wilson return -EFAULT; 239510be98a7SChris Wilson 239610be98a7SChris Wilson if (user_sseu.rsvd) 239710be98a7SChris Wilson return -EINVAL; 239810be98a7SChris Wilson 239910be98a7SChris Wilson if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 240010be98a7SChris Wilson return -EINVAL; 240110be98a7SChris Wilson 240210be98a7SChris Wilson lookup = 0; 240310be98a7SChris Wilson if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 240410be98a7SChris Wilson lookup |= LOOKUP_USER_INDEX; 240510be98a7SChris Wilson 240610be98a7SChris Wilson ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 240710be98a7SChris Wilson if (IS_ERR(ce)) 240810be98a7SChris Wilson return PTR_ERR(ce); 240910be98a7SChris Wilson 241010be98a7SChris Wilson err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 241110be98a7SChris Wilson if (err) { 241210be98a7SChris Wilson intel_context_put(ce); 241310be98a7SChris Wilson return err; 241410be98a7SChris Wilson } 241510be98a7SChris Wilson 241610be98a7SChris Wilson user_sseu.slice_mask = ce->sseu.slice_mask; 241710be98a7SChris Wilson user_sseu.subslice_mask = ce->sseu.subslice_mask; 241810be98a7SChris Wilson user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 241910be98a7SChris Wilson user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 242010be98a7SChris Wilson 242110be98a7SChris Wilson intel_context_unlock_pinned(ce); 242210be98a7SChris Wilson intel_context_put(ce); 242310be98a7SChris Wilson 242410be98a7SChris Wilson if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 242510be98a7SChris Wilson sizeof(user_sseu))) 242610be98a7SChris Wilson return -EFAULT; 242710be98a7SChris Wilson 242810be98a7SChris Wilson out: 242910be98a7SChris Wilson args->size = sizeof(user_sseu); 243010be98a7SChris Wilson 243110be98a7SChris Wilson return 0; 243210be98a7SChris Wilson } 243310be98a7SChris Wilson 243410be98a7SChris Wilson int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 243510be98a7SChris Wilson struct drm_file *file) 243610be98a7SChris Wilson { 243710be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 243810be98a7SChris Wilson struct drm_i915_gem_context_param *args = data; 243910be98a7SChris Wilson struct i915_gem_context *ctx; 244010be98a7SChris Wilson int ret = 0; 244110be98a7SChris Wilson 244210be98a7SChris Wilson ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 244310be98a7SChris Wilson if (!ctx) 244410be98a7SChris Wilson return -ENOENT; 244510be98a7SChris Wilson 244610be98a7SChris Wilson switch (args->param) { 244710be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ZEROMAP: 244810be98a7SChris Wilson args->size = 0; 244910be98a7SChris Wilson args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 245010be98a7SChris Wilson break; 245110be98a7SChris Wilson 245210be98a7SChris Wilson case I915_CONTEXT_PARAM_GTT_SIZE: 245310be98a7SChris Wilson args->size = 0; 2454a4e7ccdaSChris Wilson rcu_read_lock(); 2455a4e7ccdaSChris Wilson if (rcu_access_pointer(ctx->vm)) 2456a4e7ccdaSChris Wilson args->value = rcu_dereference(ctx->vm)->total; 245710be98a7SChris Wilson else 245810be98a7SChris Wilson args->value = to_i915(dev)->ggtt.vm.total; 2459a4e7ccdaSChris Wilson rcu_read_unlock(); 246010be98a7SChris Wilson break; 246110be98a7SChris Wilson 246210be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 246310be98a7SChris Wilson args->size = 0; 246410be98a7SChris Wilson args->value = i915_gem_context_no_error_capture(ctx); 246510be98a7SChris Wilson break; 246610be98a7SChris Wilson 246710be98a7SChris Wilson case I915_CONTEXT_PARAM_BANNABLE: 246810be98a7SChris Wilson args->size = 0; 246910be98a7SChris Wilson args->value = i915_gem_context_is_bannable(ctx); 247010be98a7SChris Wilson break; 247110be98a7SChris Wilson 247210be98a7SChris Wilson case I915_CONTEXT_PARAM_RECOVERABLE: 247310be98a7SChris Wilson args->size = 0; 247410be98a7SChris Wilson args->value = i915_gem_context_is_recoverable(ctx); 247510be98a7SChris Wilson break; 247610be98a7SChris Wilson 247710be98a7SChris Wilson case I915_CONTEXT_PARAM_PRIORITY: 247810be98a7SChris Wilson args->size = 0; 247910be98a7SChris Wilson args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; 248010be98a7SChris Wilson break; 248110be98a7SChris Wilson 248210be98a7SChris Wilson case I915_CONTEXT_PARAM_SSEU: 248310be98a7SChris Wilson ret = get_sseu(ctx, args); 248410be98a7SChris Wilson break; 248510be98a7SChris Wilson 248610be98a7SChris Wilson case I915_CONTEXT_PARAM_VM: 248710be98a7SChris Wilson ret = get_ppgtt(file_priv, ctx, args); 248810be98a7SChris Wilson break; 248910be98a7SChris Wilson 249010be98a7SChris Wilson case I915_CONTEXT_PARAM_ENGINES: 249110be98a7SChris Wilson ret = get_engines(ctx, args); 249210be98a7SChris Wilson break; 249310be98a7SChris Wilson 2494a0e04715SChris Wilson case I915_CONTEXT_PARAM_PERSISTENCE: 2495a0e04715SChris Wilson args->size = 0; 2496a0e04715SChris Wilson args->value = i915_gem_context_is_persistent(ctx); 2497a0e04715SChris Wilson break; 2498a0e04715SChris Wilson 249988be76cdSChris Wilson case I915_CONTEXT_PARAM_RINGSIZE: 250088be76cdSChris Wilson ret = get_ringsize(ctx, args); 250188be76cdSChris Wilson break; 250288be76cdSChris Wilson 250310be98a7SChris Wilson case I915_CONTEXT_PARAM_BAN_PERIOD: 250410be98a7SChris Wilson default: 250510be98a7SChris Wilson ret = -EINVAL; 250610be98a7SChris Wilson break; 250710be98a7SChris Wilson } 250810be98a7SChris Wilson 250910be98a7SChris Wilson i915_gem_context_put(ctx); 251010be98a7SChris Wilson return ret; 251110be98a7SChris Wilson } 251210be98a7SChris Wilson 251310be98a7SChris Wilson int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 251410be98a7SChris Wilson struct drm_file *file) 251510be98a7SChris Wilson { 251610be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 251710be98a7SChris Wilson struct drm_i915_gem_context_param *args = data; 251810be98a7SChris Wilson struct i915_gem_context *ctx; 251910be98a7SChris Wilson int ret; 252010be98a7SChris Wilson 252110be98a7SChris Wilson ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 252210be98a7SChris Wilson if (!ctx) 252310be98a7SChris Wilson return -ENOENT; 252410be98a7SChris Wilson 252510be98a7SChris Wilson ret = ctx_setparam(file_priv, ctx, args); 252610be98a7SChris Wilson 252710be98a7SChris Wilson i915_gem_context_put(ctx); 252810be98a7SChris Wilson return ret; 252910be98a7SChris Wilson } 253010be98a7SChris Wilson 253110be98a7SChris Wilson int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 253210be98a7SChris Wilson void *data, struct drm_file *file) 253310be98a7SChris Wilson { 2534a4e7ccdaSChris Wilson struct drm_i915_private *i915 = to_i915(dev); 253510be98a7SChris Wilson struct drm_i915_reset_stats *args = data; 253610be98a7SChris Wilson struct i915_gem_context *ctx; 253710be98a7SChris Wilson int ret; 253810be98a7SChris Wilson 253910be98a7SChris Wilson if (args->flags || args->pad) 254010be98a7SChris Wilson return -EINVAL; 254110be98a7SChris Wilson 254210be98a7SChris Wilson ret = -ENOENT; 254310be98a7SChris Wilson rcu_read_lock(); 254410be98a7SChris Wilson ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); 254510be98a7SChris Wilson if (!ctx) 254610be98a7SChris Wilson goto out; 254710be98a7SChris Wilson 254810be98a7SChris Wilson /* 254910be98a7SChris Wilson * We opt for unserialised reads here. This may result in tearing 255010be98a7SChris Wilson * in the extremely unlikely event of a GPU hang on this context 255110be98a7SChris Wilson * as we are querying them. If we need that extra layer of protection, 255210be98a7SChris Wilson * we should wrap the hangstats with a seqlock. 255310be98a7SChris Wilson */ 255410be98a7SChris Wilson 255510be98a7SChris Wilson if (capable(CAP_SYS_ADMIN)) 2556a4e7ccdaSChris Wilson args->reset_count = i915_reset_count(&i915->gpu_error); 255710be98a7SChris Wilson else 255810be98a7SChris Wilson args->reset_count = 0; 255910be98a7SChris Wilson 256010be98a7SChris Wilson args->batch_active = atomic_read(&ctx->guilty_count); 256110be98a7SChris Wilson args->batch_pending = atomic_read(&ctx->active_count); 256210be98a7SChris Wilson 256310be98a7SChris Wilson ret = 0; 256410be98a7SChris Wilson out: 256510be98a7SChris Wilson rcu_read_unlock(); 256610be98a7SChris Wilson return ret; 256710be98a7SChris Wilson } 256810be98a7SChris Wilson 256910be98a7SChris Wilson /* GEM context-engines iterator: for_each_gem_engine() */ 257010be98a7SChris Wilson struct intel_context * 257110be98a7SChris Wilson i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 257210be98a7SChris Wilson { 257310be98a7SChris Wilson const struct i915_gem_engines *e = it->engines; 257410be98a7SChris Wilson struct intel_context *ctx; 257510be98a7SChris Wilson 2576130a95e9SChris Wilson if (unlikely(!e)) 2577130a95e9SChris Wilson return NULL; 2578130a95e9SChris Wilson 257910be98a7SChris Wilson do { 258010be98a7SChris Wilson if (it->idx >= e->num_engines) 258110be98a7SChris Wilson return NULL; 258210be98a7SChris Wilson 258310be98a7SChris Wilson ctx = e->engines[it->idx++]; 258410be98a7SChris Wilson } while (!ctx); 258510be98a7SChris Wilson 258610be98a7SChris Wilson return ctx; 258710be98a7SChris Wilson } 258810be98a7SChris Wilson 258910be98a7SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 259010be98a7SChris Wilson #include "selftests/mock_context.c" 259110be98a7SChris Wilson #include "selftests/i915_gem_context.c" 259210be98a7SChris Wilson #endif 259310be98a7SChris Wilson 259410be98a7SChris Wilson static void i915_global_gem_context_shrink(void) 259510be98a7SChris Wilson { 259610be98a7SChris Wilson kmem_cache_shrink(global.slab_luts); 259710be98a7SChris Wilson } 259810be98a7SChris Wilson 259910be98a7SChris Wilson static void i915_global_gem_context_exit(void) 260010be98a7SChris Wilson { 260110be98a7SChris Wilson kmem_cache_destroy(global.slab_luts); 260210be98a7SChris Wilson } 260310be98a7SChris Wilson 260410be98a7SChris Wilson static struct i915_global_gem_context global = { { 260510be98a7SChris Wilson .shrink = i915_global_gem_context_shrink, 260610be98a7SChris Wilson .exit = i915_global_gem_context_exit, 260710be98a7SChris Wilson } }; 260810be98a7SChris Wilson 260910be98a7SChris Wilson int __init i915_global_gem_context_init(void) 261010be98a7SChris Wilson { 261110be98a7SChris Wilson global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); 261210be98a7SChris Wilson if (!global.slab_luts) 261310be98a7SChris Wilson return -ENOMEM; 261410be98a7SChris Wilson 261510be98a7SChris Wilson i915_global_register(&global.base); 261610be98a7SChris Wilson return 0; 261710be98a7SChris Wilson } 2618