110be98a7SChris Wilson /* 210be98a7SChris Wilson * SPDX-License-Identifier: MIT 310be98a7SChris Wilson * 410be98a7SChris Wilson * Copyright © 2011-2012 Intel Corporation 510be98a7SChris Wilson */ 610be98a7SChris Wilson 710be98a7SChris Wilson /* 810be98a7SChris Wilson * This file implements HW context support. On gen5+ a HW context consists of an 910be98a7SChris Wilson * opaque GPU object which is referenced at times of context saves and restores. 1010be98a7SChris Wilson * With RC6 enabled, the context is also referenced as the GPU enters and exists 1110be98a7SChris Wilson * from RC6 (GPU has it's own internal power context, except on gen5). Though 1210be98a7SChris Wilson * something like a context does exist for the media ring, the code only 1310be98a7SChris Wilson * supports contexts for the render ring. 1410be98a7SChris Wilson * 1510be98a7SChris Wilson * In software, there is a distinction between contexts created by the user, 1610be98a7SChris Wilson * and the default HW context. The default HW context is used by GPU clients 1710be98a7SChris Wilson * that do not request setup of their own hardware context. The default 1810be98a7SChris Wilson * context's state is never restored to help prevent programming errors. This 1910be98a7SChris Wilson * would happen if a client ran and piggy-backed off another clients GPU state. 2010be98a7SChris Wilson * The default context only exists to give the GPU some offset to load as the 2110be98a7SChris Wilson * current to invoke a save of the context we actually care about. In fact, the 2210be98a7SChris Wilson * code could likely be constructed, albeit in a more complicated fashion, to 2310be98a7SChris Wilson * never use the default context, though that limits the driver's ability to 2410be98a7SChris Wilson * swap out, and/or destroy other contexts. 2510be98a7SChris Wilson * 2610be98a7SChris Wilson * All other contexts are created as a request by the GPU client. These contexts 2710be98a7SChris Wilson * store GPU state, and thus allow GPU clients to not re-emit state (and 2810be98a7SChris Wilson * potentially query certain state) at any time. The kernel driver makes 2910be98a7SChris Wilson * certain that the appropriate commands are inserted. 3010be98a7SChris Wilson * 3110be98a7SChris Wilson * The context life cycle is semi-complicated in that context BOs may live 3210be98a7SChris Wilson * longer than the context itself because of the way the hardware, and object 3310be98a7SChris Wilson * tracking works. Below is a very crude representation of the state machine 3410be98a7SChris Wilson * describing the context life. 3510be98a7SChris Wilson * refcount pincount active 3610be98a7SChris Wilson * S0: initial state 0 0 0 3710be98a7SChris Wilson * S1: context created 1 0 0 3810be98a7SChris Wilson * S2: context is currently running 2 1 X 3910be98a7SChris Wilson * S3: GPU referenced, but not current 2 0 1 4010be98a7SChris Wilson * S4: context is current, but destroyed 1 1 0 4110be98a7SChris Wilson * S5: like S3, but destroyed 1 0 1 4210be98a7SChris Wilson * 4310be98a7SChris Wilson * The most common (but not all) transitions: 4410be98a7SChris Wilson * S0->S1: client creates a context 4510be98a7SChris Wilson * S1->S2: client submits execbuf with context 4610be98a7SChris Wilson * S2->S3: other clients submits execbuf with context 4710be98a7SChris Wilson * S3->S1: context object was retired 4810be98a7SChris Wilson * S3->S2: clients submits another execbuf 4910be98a7SChris Wilson * S2->S4: context destroy called with current context 5010be98a7SChris Wilson * S3->S5->S0: destroy path 5110be98a7SChris Wilson * S4->S5->S0: destroy path on current context 5210be98a7SChris Wilson * 5310be98a7SChris Wilson * There are two confusing terms used above: 5410be98a7SChris Wilson * The "current context" means the context which is currently running on the 5510be98a7SChris Wilson * GPU. The GPU has loaded its state already and has stored away the gtt 5610be98a7SChris Wilson * offset of the BO. The GPU is not actively referencing the data at this 5710be98a7SChris Wilson * offset, but it will on the next context switch. The only way to avoid this 5810be98a7SChris Wilson * is to do a GPU reset. 5910be98a7SChris Wilson * 6010be98a7SChris Wilson * An "active context' is one which was previously the "current context" and is 6110be98a7SChris Wilson * on the active list waiting for the next context switch to occur. Until this 6210be98a7SChris Wilson * happens, the object must remain at the same gtt offset. It is therefore 6310be98a7SChris Wilson * possible to destroy a context, but it is still active. 6410be98a7SChris Wilson * 6510be98a7SChris Wilson */ 6610be98a7SChris Wilson 67e9b67ec2SJani Nikula #include <linux/highmem.h> 6810be98a7SChris Wilson #include <linux/log2.h> 6910be98a7SChris Wilson #include <linux/nospec.h> 7010be98a7SChris Wilson 715f2ec909SJani Nikula #include <drm/drm_cache.h> 7200dae4d3SJason Ekstrand #include <drm/drm_syncobj.h> 7300dae4d3SJason Ekstrand 742c86e55dSMatthew Auld #include "gt/gen6_ppgtt.h" 759f3ccd40SChris Wilson #include "gt/intel_context.h" 7688be76cdSChris Wilson #include "gt/intel_context_param.h" 772e0986a5SChris Wilson #include "gt/intel_engine_heartbeat.h" 78750e76b4SChris Wilson #include "gt/intel_engine_user.h" 7945233ab2SChris Wilson #include "gt/intel_gpu_commands.h" 802871ea85SChris Wilson #include "gt/intel_ring.h" 8110be98a7SChris Wilson 82d3ac8d42SDaniele Ceraolo Spurio #include "pxp/intel_pxp.h" 83d3ac8d42SDaniele Ceraolo Spurio 845472b3f2SJani Nikula #include "i915_file_private.h" 8510be98a7SChris Wilson #include "i915_gem_context.h" 8610be98a7SChris Wilson #include "i915_trace.h" 8710be98a7SChris Wilson #include "i915_user_extensions.h" 8810be98a7SChris Wilson 8910be98a7SChris Wilson #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 9010be98a7SChris Wilson 91a6270d1dSDaniel Vetter static struct kmem_cache *slab_luts; 9210be98a7SChris Wilson 9310be98a7SChris Wilson struct i915_lut_handle *i915_lut_handle_alloc(void) 9410be98a7SChris Wilson { 95a6270d1dSDaniel Vetter return kmem_cache_alloc(slab_luts, GFP_KERNEL); 9610be98a7SChris Wilson } 9710be98a7SChris Wilson 9810be98a7SChris Wilson void i915_lut_handle_free(struct i915_lut_handle *lut) 9910be98a7SChris Wilson { 100a6270d1dSDaniel Vetter return kmem_cache_free(slab_luts, lut); 10110be98a7SChris Wilson } 10210be98a7SChris Wilson 10310be98a7SChris Wilson static void lut_close(struct i915_gem_context *ctx) 10410be98a7SChris Wilson { 10510be98a7SChris Wilson struct radix_tree_iter iter; 10610be98a7SChris Wilson void __rcu **slot; 10710be98a7SChris Wilson 108f7ce8639SChris Wilson mutex_lock(&ctx->lut_mutex); 10910be98a7SChris Wilson rcu_read_lock(); 11010be98a7SChris Wilson radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 11110be98a7SChris Wilson struct i915_vma *vma = rcu_dereference_raw(*slot); 112155ab883SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 113155ab883SChris Wilson struct i915_lut_handle *lut; 11410be98a7SChris Wilson 115155ab883SChris Wilson if (!kref_get_unless_zero(&obj->base.refcount)) 116155ab883SChris Wilson continue; 117155ab883SChris Wilson 118096a42ddSChris Wilson spin_lock(&obj->lut_lock); 119155ab883SChris Wilson list_for_each_entry(lut, &obj->lut_list, obj_link) { 120155ab883SChris Wilson if (lut->ctx != ctx) 121155ab883SChris Wilson continue; 122155ab883SChris Wilson 123155ab883SChris Wilson if (lut->handle != iter.index) 124155ab883SChris Wilson continue; 125155ab883SChris Wilson 126155ab883SChris Wilson list_del(&lut->obj_link); 127155ab883SChris Wilson break; 128155ab883SChris Wilson } 129096a42ddSChris Wilson spin_unlock(&obj->lut_lock); 130155ab883SChris Wilson 131155ab883SChris Wilson if (&lut->obj_link != &obj->lut_list) { 132155ab883SChris Wilson i915_lut_handle_free(lut); 13310be98a7SChris Wilson radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 134155ab883SChris Wilson i915_vma_close(vma); 135155ab883SChris Wilson i915_gem_object_put(obj); 136155ab883SChris Wilson } 13710be98a7SChris Wilson 138155ab883SChris Wilson i915_gem_object_put(obj); 13910be98a7SChris Wilson } 14010be98a7SChris Wilson rcu_read_unlock(); 141f7ce8639SChris Wilson mutex_unlock(&ctx->lut_mutex); 14210be98a7SChris Wilson } 14310be98a7SChris Wilson 14410be98a7SChris Wilson static struct intel_context * 14510be98a7SChris Wilson lookup_user_engine(struct i915_gem_context *ctx, 14610be98a7SChris Wilson unsigned long flags, 14710be98a7SChris Wilson const struct i915_engine_class_instance *ci) 14810be98a7SChris Wilson #define LOOKUP_USER_INDEX BIT(0) 14910be98a7SChris Wilson { 15010be98a7SChris Wilson int idx; 15110be98a7SChris Wilson 15210be98a7SChris Wilson if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 15310be98a7SChris Wilson return ERR_PTR(-EINVAL); 15410be98a7SChris Wilson 15510be98a7SChris Wilson if (!i915_gem_context_user_engines(ctx)) { 15610be98a7SChris Wilson struct intel_engine_cs *engine; 15710be98a7SChris Wilson 15810be98a7SChris Wilson engine = intel_engine_lookup_user(ctx->i915, 15910be98a7SChris Wilson ci->engine_class, 16010be98a7SChris Wilson ci->engine_instance); 16110be98a7SChris Wilson if (!engine) 16210be98a7SChris Wilson return ERR_PTR(-EINVAL); 16310be98a7SChris Wilson 164f1c4d157SChris Wilson idx = engine->legacy_idx; 16510be98a7SChris Wilson } else { 16610be98a7SChris Wilson idx = ci->engine_instance; 16710be98a7SChris Wilson } 16810be98a7SChris Wilson 16910be98a7SChris Wilson return i915_gem_context_get_engine(ctx, idx); 17010be98a7SChris Wilson } 17110be98a7SChris Wilson 172aaa5957cSJason Ekstrand static int validate_priority(struct drm_i915_private *i915, 173aaa5957cSJason Ekstrand const struct drm_i915_gem_context_param *args) 174aaa5957cSJason Ekstrand { 175aaa5957cSJason Ekstrand s64 priority = args->value; 176aaa5957cSJason Ekstrand 177aaa5957cSJason Ekstrand if (args->size) 178aaa5957cSJason Ekstrand return -EINVAL; 179aaa5957cSJason Ekstrand 180aaa5957cSJason Ekstrand if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 181aaa5957cSJason Ekstrand return -ENODEV; 182aaa5957cSJason Ekstrand 183aaa5957cSJason Ekstrand if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 184aaa5957cSJason Ekstrand priority < I915_CONTEXT_MIN_USER_PRIORITY) 185aaa5957cSJason Ekstrand return -EINVAL; 186aaa5957cSJason Ekstrand 187aaa5957cSJason Ekstrand if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 188aaa5957cSJason Ekstrand !capable(CAP_SYS_NICE)) 189aaa5957cSJason Ekstrand return -EPERM; 190aaa5957cSJason Ekstrand 191aaa5957cSJason Ekstrand return 0; 192aaa5957cSJason Ekstrand } 193aaa5957cSJason Ekstrand 194d3ac8d42SDaniele Ceraolo Spurio static void proto_context_close(struct drm_i915_private *i915, 195d3ac8d42SDaniele Ceraolo Spurio struct i915_gem_proto_context *pc) 196a34857dcSJason Ekstrand { 197d4433c76SJason Ekstrand int i; 198d4433c76SJason Ekstrand 199d3ac8d42SDaniele Ceraolo Spurio if (pc->pxp_wakeref) 200d3ac8d42SDaniele Ceraolo Spurio intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref); 201a34857dcSJason Ekstrand if (pc->vm) 202a34857dcSJason Ekstrand i915_vm_put(pc->vm); 203d4433c76SJason Ekstrand if (pc->user_engines) { 204d4433c76SJason Ekstrand for (i = 0; i < pc->num_user_engines; i++) 205d4433c76SJason Ekstrand kfree(pc->user_engines[i].siblings); 206d4433c76SJason Ekstrand kfree(pc->user_engines); 207d4433c76SJason Ekstrand } 208a34857dcSJason Ekstrand kfree(pc); 209a34857dcSJason Ekstrand } 210a34857dcSJason Ekstrand 211d4433c76SJason Ekstrand static int proto_context_set_persistence(struct drm_i915_private *i915, 212d4433c76SJason Ekstrand struct i915_gem_proto_context *pc, 213d4433c76SJason Ekstrand bool persist) 214d4433c76SJason Ekstrand { 215d4433c76SJason Ekstrand if (persist) { 216d4433c76SJason Ekstrand /* 217d4433c76SJason Ekstrand * Only contexts that are short-lived [that will expire or be 218d4433c76SJason Ekstrand * reset] are allowed to survive past termination. We require 219d4433c76SJason Ekstrand * hangcheck to ensure that the persistent requests are healthy. 220d4433c76SJason Ekstrand */ 221d4433c76SJason Ekstrand if (!i915->params.enable_hangcheck) 222d4433c76SJason Ekstrand return -EINVAL; 223d4433c76SJason Ekstrand 224d4433c76SJason Ekstrand pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); 225d4433c76SJason Ekstrand } else { 226d4433c76SJason Ekstrand /* To cancel a context we use "preempt-to-idle" */ 227d4433c76SJason Ekstrand if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 228d4433c76SJason Ekstrand return -ENODEV; 229d4433c76SJason Ekstrand 230d4433c76SJason Ekstrand /* 231d4433c76SJason Ekstrand * If the cancel fails, we then need to reset, cleanly! 232d4433c76SJason Ekstrand * 233d4433c76SJason Ekstrand * If the per-engine reset fails, all hope is lost! We resort 234d4433c76SJason Ekstrand * to a full GPU reset in that unlikely case, but realistically 235d4433c76SJason Ekstrand * if the engine could not reset, the full reset does not fare 236d4433c76SJason Ekstrand * much better. The damage has been done. 237d4433c76SJason Ekstrand * 238d4433c76SJason Ekstrand * However, if we cannot reset an engine by itself, we cannot 239d4433c76SJason Ekstrand * cleanup a hanging persistent context without causing 240d4433c76SJason Ekstrand * colateral damage, and we should not pretend we can by 241d4433c76SJason Ekstrand * exposing the interface. 242d4433c76SJason Ekstrand */ 2431a9c4db4SMichał Winiarski if (!intel_has_reset_engine(to_gt(i915))) 244d4433c76SJason Ekstrand return -ENODEV; 245d4433c76SJason Ekstrand 246d4433c76SJason Ekstrand pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); 247d4433c76SJason Ekstrand } 248d4433c76SJason Ekstrand 249d4433c76SJason Ekstrand return 0; 250d4433c76SJason Ekstrand } 251d4433c76SJason Ekstrand 252d3ac8d42SDaniele Ceraolo Spurio static int proto_context_set_protected(struct drm_i915_private *i915, 253d3ac8d42SDaniele Ceraolo Spurio struct i915_gem_proto_context *pc, 254d3ac8d42SDaniele Ceraolo Spurio bool protected) 255d3ac8d42SDaniele Ceraolo Spurio { 256d3ac8d42SDaniele Ceraolo Spurio int ret = 0; 257d3ac8d42SDaniele Ceraolo Spurio 258d3ac8d42SDaniele Ceraolo Spurio if (!protected) { 259d3ac8d42SDaniele Ceraolo Spurio pc->uses_protected_content = false; 2601a9c4db4SMichał Winiarski } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) { 261d3ac8d42SDaniele Ceraolo Spurio ret = -ENODEV; 262d3ac8d42SDaniele Ceraolo Spurio } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || 263d3ac8d42SDaniele Ceraolo Spurio !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { 264d3ac8d42SDaniele Ceraolo Spurio ret = -EPERM; 265d3ac8d42SDaniele Ceraolo Spurio } else { 266d3ac8d42SDaniele Ceraolo Spurio pc->uses_protected_content = true; 267d3ac8d42SDaniele Ceraolo Spurio 268d3ac8d42SDaniele Ceraolo Spurio /* 269d3ac8d42SDaniele Ceraolo Spurio * protected context usage requires the PXP session to be up, 270d3ac8d42SDaniele Ceraolo Spurio * which in turn requires the device to be active. 271d3ac8d42SDaniele Ceraolo Spurio */ 272d3ac8d42SDaniele Ceraolo Spurio pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); 27332271ecdSDaniele Ceraolo Spurio 2741a9c4db4SMichał Winiarski if (!intel_pxp_is_active(&to_gt(i915)->pxp)) 2751a9c4db4SMichał Winiarski ret = intel_pxp_start(&to_gt(i915)->pxp); 276d3ac8d42SDaniele Ceraolo Spurio } 277d3ac8d42SDaniele Ceraolo Spurio 278d3ac8d42SDaniele Ceraolo Spurio return ret; 279d3ac8d42SDaniele Ceraolo Spurio } 280d3ac8d42SDaniele Ceraolo Spurio 281a34857dcSJason Ekstrand static struct i915_gem_proto_context * 282a34857dcSJason Ekstrand proto_context_create(struct drm_i915_private *i915, unsigned int flags) 283a34857dcSJason Ekstrand { 284a34857dcSJason Ekstrand struct i915_gem_proto_context *pc, *err; 285a34857dcSJason Ekstrand 286a34857dcSJason Ekstrand pc = kzalloc(sizeof(*pc), GFP_KERNEL); 287a34857dcSJason Ekstrand if (!pc) 288a34857dcSJason Ekstrand return ERR_PTR(-ENOMEM); 289a34857dcSJason Ekstrand 290d4433c76SJason Ekstrand pc->num_user_engines = -1; 291d4433c76SJason Ekstrand pc->user_engines = NULL; 292a34857dcSJason Ekstrand pc->user_flags = BIT(UCONTEXT_BANNABLE) | 293a34857dcSJason Ekstrand BIT(UCONTEXT_RECOVERABLE); 294a34857dcSJason Ekstrand if (i915->params.enable_hangcheck) 295a34857dcSJason Ekstrand pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); 296a34857dcSJason Ekstrand pc->sched.priority = I915_PRIORITY_NORMAL; 297a34857dcSJason Ekstrand 298a34857dcSJason Ekstrand if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 299a34857dcSJason Ekstrand if (!HAS_EXECLISTS(i915)) { 300a34857dcSJason Ekstrand err = ERR_PTR(-EINVAL); 301a34857dcSJason Ekstrand goto proto_close; 302a34857dcSJason Ekstrand } 303a34857dcSJason Ekstrand pc->single_timeline = true; 304a34857dcSJason Ekstrand } 305a34857dcSJason Ekstrand 306a34857dcSJason Ekstrand return pc; 307a34857dcSJason Ekstrand 308a34857dcSJason Ekstrand proto_close: 309d3ac8d42SDaniele Ceraolo Spurio proto_context_close(i915, pc); 310a34857dcSJason Ekstrand return err; 311a34857dcSJason Ekstrand } 312a34857dcSJason Ekstrand 313a4c1cdd3SJason Ekstrand static int proto_context_register_locked(struct drm_i915_file_private *fpriv, 314a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc, 315a4c1cdd3SJason Ekstrand u32 *id) 316a4c1cdd3SJason Ekstrand { 317a4c1cdd3SJason Ekstrand int ret; 318a4c1cdd3SJason Ekstrand void *old; 319a4c1cdd3SJason Ekstrand 320a4c1cdd3SJason Ekstrand lockdep_assert_held(&fpriv->proto_context_lock); 321a4c1cdd3SJason Ekstrand 322a4c1cdd3SJason Ekstrand ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL); 323a4c1cdd3SJason Ekstrand if (ret) 324a4c1cdd3SJason Ekstrand return ret; 325a4c1cdd3SJason Ekstrand 326a4c1cdd3SJason Ekstrand old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL); 327a4c1cdd3SJason Ekstrand if (xa_is_err(old)) { 328a4c1cdd3SJason Ekstrand xa_erase(&fpriv->context_xa, *id); 329a4c1cdd3SJason Ekstrand return xa_err(old); 330a4c1cdd3SJason Ekstrand } 331a4c1cdd3SJason Ekstrand WARN_ON(old); 332a4c1cdd3SJason Ekstrand 333a4c1cdd3SJason Ekstrand return 0; 334a4c1cdd3SJason Ekstrand } 335a4c1cdd3SJason Ekstrand 336a4c1cdd3SJason Ekstrand static int proto_context_register(struct drm_i915_file_private *fpriv, 337a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc, 338a4c1cdd3SJason Ekstrand u32 *id) 339a4c1cdd3SJason Ekstrand { 340a4c1cdd3SJason Ekstrand int ret; 341a4c1cdd3SJason Ekstrand 342a4c1cdd3SJason Ekstrand mutex_lock(&fpriv->proto_context_lock); 343a4c1cdd3SJason Ekstrand ret = proto_context_register_locked(fpriv, pc, id); 344a4c1cdd3SJason Ekstrand mutex_unlock(&fpriv->proto_context_lock); 345a4c1cdd3SJason Ekstrand 346a4c1cdd3SJason Ekstrand return ret; 347a4c1cdd3SJason Ekstrand } 348a4c1cdd3SJason Ekstrand 349d83d5298SJani Nikula static struct i915_address_space * 350d83d5298SJani Nikula i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) 351d83d5298SJani Nikula { 352d83d5298SJani Nikula struct i915_address_space *vm; 353d83d5298SJani Nikula 354d83d5298SJani Nikula xa_lock(&file_priv->vm_xa); 355d83d5298SJani Nikula vm = xa_load(&file_priv->vm_xa, id); 356d83d5298SJani Nikula if (vm) 357d83d5298SJani Nikula kref_get(&vm->ref); 358d83d5298SJani Nikula xa_unlock(&file_priv->vm_xa); 359d83d5298SJani Nikula 360d83d5298SJani Nikula return vm; 361d83d5298SJani Nikula } 362d83d5298SJani Nikula 363d4433c76SJason Ekstrand static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv, 364d4433c76SJason Ekstrand struct i915_gem_proto_context *pc, 365d4433c76SJason Ekstrand const struct drm_i915_gem_context_param *args) 366d4433c76SJason Ekstrand { 367d4433c76SJason Ekstrand struct drm_i915_private *i915 = fpriv->dev_priv; 368d4433c76SJason Ekstrand struct i915_address_space *vm; 369d4433c76SJason Ekstrand 370d4433c76SJason Ekstrand if (args->size) 371d4433c76SJason Ekstrand return -EINVAL; 372d4433c76SJason Ekstrand 373d4433c76SJason Ekstrand if (!HAS_FULL_PPGTT(i915)) 374d4433c76SJason Ekstrand return -ENODEV; 375d4433c76SJason Ekstrand 376d4433c76SJason Ekstrand if (upper_32_bits(args->value)) 377d4433c76SJason Ekstrand return -ENOENT; 378d4433c76SJason Ekstrand 379d4433c76SJason Ekstrand vm = i915_gem_vm_lookup(fpriv, args->value); 380d4433c76SJason Ekstrand if (!vm) 381d4433c76SJason Ekstrand return -ENOENT; 382d4433c76SJason Ekstrand 383d4433c76SJason Ekstrand if (pc->vm) 384d4433c76SJason Ekstrand i915_vm_put(pc->vm); 385d4433c76SJason Ekstrand pc->vm = vm; 386d4433c76SJason Ekstrand 387d4433c76SJason Ekstrand return 0; 388d4433c76SJason Ekstrand } 389d4433c76SJason Ekstrand 390d4433c76SJason Ekstrand struct set_proto_ctx_engines { 391d4433c76SJason Ekstrand struct drm_i915_private *i915; 392d4433c76SJason Ekstrand unsigned num_engines; 393d4433c76SJason Ekstrand struct i915_gem_proto_engine *engines; 394d4433c76SJason Ekstrand }; 395d4433c76SJason Ekstrand 396d4433c76SJason Ekstrand static int 397d4433c76SJason Ekstrand set_proto_ctx_engines_balance(struct i915_user_extension __user *base, 398d4433c76SJason Ekstrand void *data) 399d4433c76SJason Ekstrand { 400d4433c76SJason Ekstrand struct i915_context_engines_load_balance __user *ext = 401d4433c76SJason Ekstrand container_of_user(base, typeof(*ext), base); 402d4433c76SJason Ekstrand const struct set_proto_ctx_engines *set = data; 403d4433c76SJason Ekstrand struct drm_i915_private *i915 = set->i915; 404d4433c76SJason Ekstrand struct intel_engine_cs **siblings; 405d4433c76SJason Ekstrand u16 num_siblings, idx; 406d4433c76SJason Ekstrand unsigned int n; 407d4433c76SJason Ekstrand int err; 408d4433c76SJason Ekstrand 409d4433c76SJason Ekstrand if (!HAS_EXECLISTS(i915)) 410d4433c76SJason Ekstrand return -ENODEV; 411d4433c76SJason Ekstrand 412d4433c76SJason Ekstrand if (get_user(idx, &ext->engine_index)) 413d4433c76SJason Ekstrand return -EFAULT; 414d4433c76SJason Ekstrand 415d4433c76SJason Ekstrand if (idx >= set->num_engines) { 416d4433c76SJason Ekstrand drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 417d4433c76SJason Ekstrand idx, set->num_engines); 418d4433c76SJason Ekstrand return -EINVAL; 419d4433c76SJason Ekstrand } 420d4433c76SJason Ekstrand 421d4433c76SJason Ekstrand idx = array_index_nospec(idx, set->num_engines); 422d4433c76SJason Ekstrand if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) { 423d4433c76SJason Ekstrand drm_dbg(&i915->drm, 424d4433c76SJason Ekstrand "Invalid placement[%d], already occupied\n", idx); 425d4433c76SJason Ekstrand return -EEXIST; 426d4433c76SJason Ekstrand } 427d4433c76SJason Ekstrand 428d4433c76SJason Ekstrand if (get_user(num_siblings, &ext->num_siblings)) 429d4433c76SJason Ekstrand return -EFAULT; 430d4433c76SJason Ekstrand 431d4433c76SJason Ekstrand err = check_user_mbz(&ext->flags); 432d4433c76SJason Ekstrand if (err) 433d4433c76SJason Ekstrand return err; 434d4433c76SJason Ekstrand 435d4433c76SJason Ekstrand err = check_user_mbz(&ext->mbz64); 436d4433c76SJason Ekstrand if (err) 437d4433c76SJason Ekstrand return err; 438d4433c76SJason Ekstrand 439d4433c76SJason Ekstrand if (num_siblings == 0) 440d4433c76SJason Ekstrand return 0; 441d4433c76SJason Ekstrand 442d4433c76SJason Ekstrand siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); 443d4433c76SJason Ekstrand if (!siblings) 444d4433c76SJason Ekstrand return -ENOMEM; 445d4433c76SJason Ekstrand 446d4433c76SJason Ekstrand for (n = 0; n < num_siblings; n++) { 447d4433c76SJason Ekstrand struct i915_engine_class_instance ci; 448d4433c76SJason Ekstrand 449d4433c76SJason Ekstrand if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 450d4433c76SJason Ekstrand err = -EFAULT; 451d4433c76SJason Ekstrand goto err_siblings; 452d4433c76SJason Ekstrand } 453d4433c76SJason Ekstrand 454d4433c76SJason Ekstrand siblings[n] = intel_engine_lookup_user(i915, 455d4433c76SJason Ekstrand ci.engine_class, 456d4433c76SJason Ekstrand ci.engine_instance); 457d4433c76SJason Ekstrand if (!siblings[n]) { 458d4433c76SJason Ekstrand drm_dbg(&i915->drm, 459d4433c76SJason Ekstrand "Invalid sibling[%d]: { class:%d, inst:%d }\n", 460d4433c76SJason Ekstrand n, ci.engine_class, ci.engine_instance); 461d4433c76SJason Ekstrand err = -EINVAL; 462d4433c76SJason Ekstrand goto err_siblings; 463d4433c76SJason Ekstrand } 464d4433c76SJason Ekstrand } 465d4433c76SJason Ekstrand 466d4433c76SJason Ekstrand if (num_siblings == 1) { 467d4433c76SJason Ekstrand set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL; 468d4433c76SJason Ekstrand set->engines[idx].engine = siblings[0]; 469d4433c76SJason Ekstrand kfree(siblings); 470d4433c76SJason Ekstrand } else { 471d4433c76SJason Ekstrand set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED; 472d4433c76SJason Ekstrand set->engines[idx].num_siblings = num_siblings; 473d4433c76SJason Ekstrand set->engines[idx].siblings = siblings; 474d4433c76SJason Ekstrand } 475d4433c76SJason Ekstrand 476d4433c76SJason Ekstrand return 0; 477d4433c76SJason Ekstrand 478d4433c76SJason Ekstrand err_siblings: 479d4433c76SJason Ekstrand kfree(siblings); 480d4433c76SJason Ekstrand 481d4433c76SJason Ekstrand return err; 482d4433c76SJason Ekstrand } 483d4433c76SJason Ekstrand 484d4433c76SJason Ekstrand static int 485d4433c76SJason Ekstrand set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) 486d4433c76SJason Ekstrand { 487d4433c76SJason Ekstrand struct i915_context_engines_bond __user *ext = 488d4433c76SJason Ekstrand container_of_user(base, typeof(*ext), base); 489d4433c76SJason Ekstrand const struct set_proto_ctx_engines *set = data; 490d4433c76SJason Ekstrand struct drm_i915_private *i915 = set->i915; 491d4433c76SJason Ekstrand struct i915_engine_class_instance ci; 492d4433c76SJason Ekstrand struct intel_engine_cs *master; 493d4433c76SJason Ekstrand u16 idx, num_bonds; 494d4433c76SJason Ekstrand int err, n; 495d4433c76SJason Ekstrand 496ce7e75c7SMatthew Brost if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && 497ce7e75c7SMatthew Brost !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { 498ce7e75c7SMatthew Brost drm_dbg(&i915->drm, 4992c85034dSRodrigo Vivi "Bonding not supported on this platform\n"); 500ce7e75c7SMatthew Brost return -ENODEV; 501ce7e75c7SMatthew Brost } 502ce7e75c7SMatthew Brost 503d4433c76SJason Ekstrand if (get_user(idx, &ext->virtual_index)) 504d4433c76SJason Ekstrand return -EFAULT; 505d4433c76SJason Ekstrand 506d4433c76SJason Ekstrand if (idx >= set->num_engines) { 507d4433c76SJason Ekstrand drm_dbg(&i915->drm, 508d4433c76SJason Ekstrand "Invalid index for virtual engine: %d >= %d\n", 509d4433c76SJason Ekstrand idx, set->num_engines); 510d4433c76SJason Ekstrand return -EINVAL; 511d4433c76SJason Ekstrand } 512d4433c76SJason Ekstrand 513d4433c76SJason Ekstrand idx = array_index_nospec(idx, set->num_engines); 514d4433c76SJason Ekstrand if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) { 515d4433c76SJason Ekstrand drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 516d4433c76SJason Ekstrand return -EINVAL; 517d4433c76SJason Ekstrand } 518d4433c76SJason Ekstrand 519d4433c76SJason Ekstrand if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) { 520d4433c76SJason Ekstrand drm_dbg(&i915->drm, 521d4433c76SJason Ekstrand "Bonding with virtual engines not allowed\n"); 522d4433c76SJason Ekstrand return -EINVAL; 523d4433c76SJason Ekstrand } 524d4433c76SJason Ekstrand 525d4433c76SJason Ekstrand err = check_user_mbz(&ext->flags); 526d4433c76SJason Ekstrand if (err) 527d4433c76SJason Ekstrand return err; 528d4433c76SJason Ekstrand 529d4433c76SJason Ekstrand for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 530d4433c76SJason Ekstrand err = check_user_mbz(&ext->mbz64[n]); 531d4433c76SJason Ekstrand if (err) 532d4433c76SJason Ekstrand return err; 533d4433c76SJason Ekstrand } 534d4433c76SJason Ekstrand 535d4433c76SJason Ekstrand if (copy_from_user(&ci, &ext->master, sizeof(ci))) 536d4433c76SJason Ekstrand return -EFAULT; 537d4433c76SJason Ekstrand 538d4433c76SJason Ekstrand master = intel_engine_lookup_user(i915, 539d4433c76SJason Ekstrand ci.engine_class, 540d4433c76SJason Ekstrand ci.engine_instance); 541d4433c76SJason Ekstrand if (!master) { 542d4433c76SJason Ekstrand drm_dbg(&i915->drm, 543d4433c76SJason Ekstrand "Unrecognised master engine: { class:%u, instance:%u }\n", 544d4433c76SJason Ekstrand ci.engine_class, ci.engine_instance); 545d4433c76SJason Ekstrand return -EINVAL; 546d4433c76SJason Ekstrand } 547d4433c76SJason Ekstrand 548b02d86b9SMatthew Brost if (intel_engine_uses_guc(master)) { 549b02d86b9SMatthew Brost DRM_DEBUG("bonding extension not supported with GuC submission"); 550b02d86b9SMatthew Brost return -ENODEV; 551b02d86b9SMatthew Brost } 552b02d86b9SMatthew Brost 553d4433c76SJason Ekstrand if (get_user(num_bonds, &ext->num_bonds)) 554d4433c76SJason Ekstrand return -EFAULT; 555d4433c76SJason Ekstrand 556d4433c76SJason Ekstrand for (n = 0; n < num_bonds; n++) { 557d4433c76SJason Ekstrand struct intel_engine_cs *bond; 558d4433c76SJason Ekstrand 559d4433c76SJason Ekstrand if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 560d4433c76SJason Ekstrand return -EFAULT; 561d4433c76SJason Ekstrand 562d4433c76SJason Ekstrand bond = intel_engine_lookup_user(i915, 563d4433c76SJason Ekstrand ci.engine_class, 564d4433c76SJason Ekstrand ci.engine_instance); 565d4433c76SJason Ekstrand if (!bond) { 566d4433c76SJason Ekstrand drm_dbg(&i915->drm, 567d4433c76SJason Ekstrand "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 568d4433c76SJason Ekstrand n, ci.engine_class, ci.engine_instance); 569d4433c76SJason Ekstrand return -EINVAL; 570d4433c76SJason Ekstrand } 571d4433c76SJason Ekstrand } 572d4433c76SJason Ekstrand 573d4433c76SJason Ekstrand return 0; 574d4433c76SJason Ekstrand } 575d4433c76SJason Ekstrand 576e5e32171SMatthew Brost static int 577e5e32171SMatthew Brost set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, 578e5e32171SMatthew Brost void *data) 579e5e32171SMatthew Brost { 580e5e32171SMatthew Brost struct i915_context_engines_parallel_submit __user *ext = 581e5e32171SMatthew Brost container_of_user(base, typeof(*ext), base); 582e5e32171SMatthew Brost const struct set_proto_ctx_engines *set = data; 583e5e32171SMatthew Brost struct drm_i915_private *i915 = set->i915; 5840f9d36afSMatthew Brost struct i915_engine_class_instance prev_engine; 585e5e32171SMatthew Brost u64 flags; 586e5e32171SMatthew Brost int err = 0, n, i, j; 587e5e32171SMatthew Brost u16 slot, width, num_siblings; 588e5e32171SMatthew Brost struct intel_engine_cs **siblings = NULL; 589e5e32171SMatthew Brost intel_engine_mask_t prev_mask; 590e5e32171SMatthew Brost 591e5e32171SMatthew Brost if (get_user(slot, &ext->engine_index)) 592e5e32171SMatthew Brost return -EFAULT; 593e5e32171SMatthew Brost 594e5e32171SMatthew Brost if (get_user(width, &ext->width)) 595e5e32171SMatthew Brost return -EFAULT; 596e5e32171SMatthew Brost 597e5e32171SMatthew Brost if (get_user(num_siblings, &ext->num_siblings)) 598e5e32171SMatthew Brost return -EFAULT; 599e5e32171SMatthew Brost 600a88afcfaSMatthew Brost if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) && 601a88afcfaSMatthew Brost num_siblings != 1) { 602a88afcfaSMatthew Brost drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n", 603a88afcfaSMatthew Brost num_siblings); 604a88afcfaSMatthew Brost return -EINVAL; 605a88afcfaSMatthew Brost } 606a88afcfaSMatthew Brost 607e5e32171SMatthew Brost if (slot >= set->num_engines) { 608e5e32171SMatthew Brost drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 609e5e32171SMatthew Brost slot, set->num_engines); 610e5e32171SMatthew Brost return -EINVAL; 611e5e32171SMatthew Brost } 612e5e32171SMatthew Brost 613e5e32171SMatthew Brost if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { 614e5e32171SMatthew Brost drm_dbg(&i915->drm, 615e5e32171SMatthew Brost "Invalid placement[%d], already occupied\n", slot); 616e5e32171SMatthew Brost return -EINVAL; 617e5e32171SMatthew Brost } 618e5e32171SMatthew Brost 619e5e32171SMatthew Brost if (get_user(flags, &ext->flags)) 620e5e32171SMatthew Brost return -EFAULT; 621e5e32171SMatthew Brost 622e5e32171SMatthew Brost if (flags) { 623e5e32171SMatthew Brost drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags); 624e5e32171SMatthew Brost return -EINVAL; 625e5e32171SMatthew Brost } 626e5e32171SMatthew Brost 627e5e32171SMatthew Brost for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 628e5e32171SMatthew Brost err = check_user_mbz(&ext->mbz64[n]); 629e5e32171SMatthew Brost if (err) 630e5e32171SMatthew Brost return err; 631e5e32171SMatthew Brost } 632e5e32171SMatthew Brost 633e5e32171SMatthew Brost if (width < 2) { 634e5e32171SMatthew Brost drm_dbg(&i915->drm, "Width (%d) < 2\n", width); 635e5e32171SMatthew Brost return -EINVAL; 636e5e32171SMatthew Brost } 637e5e32171SMatthew Brost 638e5e32171SMatthew Brost if (num_siblings < 1) { 639e5e32171SMatthew Brost drm_dbg(&i915->drm, "Number siblings (%d) < 1\n", 640e5e32171SMatthew Brost num_siblings); 641e5e32171SMatthew Brost return -EINVAL; 642e5e32171SMatthew Brost } 643e5e32171SMatthew Brost 644e5e32171SMatthew Brost siblings = kmalloc_array(num_siblings * width, 645e5e32171SMatthew Brost sizeof(*siblings), 646e5e32171SMatthew Brost GFP_KERNEL); 647e5e32171SMatthew Brost if (!siblings) 648e5e32171SMatthew Brost return -ENOMEM; 649e5e32171SMatthew Brost 650e5e32171SMatthew Brost /* Create contexts / engines */ 651e5e32171SMatthew Brost for (i = 0; i < width; ++i) { 652e5e32171SMatthew Brost intel_engine_mask_t current_mask = 0; 653e5e32171SMatthew Brost 654e5e32171SMatthew Brost for (j = 0; j < num_siblings; ++j) { 655e5e32171SMatthew Brost struct i915_engine_class_instance ci; 656e5e32171SMatthew Brost 657e5e32171SMatthew Brost n = i * num_siblings + j; 658e5e32171SMatthew Brost if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 659e5e32171SMatthew Brost err = -EFAULT; 660e5e32171SMatthew Brost goto out_err; 661e5e32171SMatthew Brost } 662e5e32171SMatthew Brost 663e5e32171SMatthew Brost siblings[n] = 664e5e32171SMatthew Brost intel_engine_lookup_user(i915, ci.engine_class, 665e5e32171SMatthew Brost ci.engine_instance); 666e5e32171SMatthew Brost if (!siblings[n]) { 667e5e32171SMatthew Brost drm_dbg(&i915->drm, 668e5e32171SMatthew Brost "Invalid sibling[%d]: { class:%d, inst:%d }\n", 669e5e32171SMatthew Brost n, ci.engine_class, ci.engine_instance); 670e5e32171SMatthew Brost err = -EINVAL; 671e5e32171SMatthew Brost goto out_err; 672e5e32171SMatthew Brost } 673e5e32171SMatthew Brost 674e393e2aaSMatthew Brost /* 675e393e2aaSMatthew Brost * We don't support breadcrumb handshake on these 676e393e2aaSMatthew Brost * classes 677e393e2aaSMatthew Brost */ 678e393e2aaSMatthew Brost if (siblings[n]->class == RENDER_CLASS || 679e393e2aaSMatthew Brost siblings[n]->class == COMPUTE_CLASS) { 680e393e2aaSMatthew Brost err = -EINVAL; 681e393e2aaSMatthew Brost goto out_err; 682e393e2aaSMatthew Brost } 683e393e2aaSMatthew Brost 684e5e32171SMatthew Brost if (n) { 685e5e32171SMatthew Brost if (prev_engine.engine_class != 686e5e32171SMatthew Brost ci.engine_class) { 687e5e32171SMatthew Brost drm_dbg(&i915->drm, 688e5e32171SMatthew Brost "Mismatched class %d, %d\n", 689e5e32171SMatthew Brost prev_engine.engine_class, 690e5e32171SMatthew Brost ci.engine_class); 691e5e32171SMatthew Brost err = -EINVAL; 692e5e32171SMatthew Brost goto out_err; 693e5e32171SMatthew Brost } 694e5e32171SMatthew Brost } 695e5e32171SMatthew Brost 696e5e32171SMatthew Brost prev_engine = ci; 697e5e32171SMatthew Brost current_mask |= siblings[n]->logical_mask; 698e5e32171SMatthew Brost } 699e5e32171SMatthew Brost 700e5e32171SMatthew Brost if (i > 0) { 701e5e32171SMatthew Brost if (current_mask != prev_mask << 1) { 702e5e32171SMatthew Brost drm_dbg(&i915->drm, 703e5e32171SMatthew Brost "Non contiguous logical mask 0x%x, 0x%x\n", 704e5e32171SMatthew Brost prev_mask, current_mask); 705e5e32171SMatthew Brost err = -EINVAL; 706e5e32171SMatthew Brost goto out_err; 707e5e32171SMatthew Brost } 708e5e32171SMatthew Brost } 709e5e32171SMatthew Brost prev_mask = current_mask; 710e5e32171SMatthew Brost } 711e5e32171SMatthew Brost 712e5e32171SMatthew Brost set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; 713e5e32171SMatthew Brost set->engines[slot].num_siblings = num_siblings; 714e5e32171SMatthew Brost set->engines[slot].width = width; 715e5e32171SMatthew Brost set->engines[slot].siblings = siblings; 716e5e32171SMatthew Brost 717e5e32171SMatthew Brost return 0; 718e5e32171SMatthew Brost 719e5e32171SMatthew Brost out_err: 720e5e32171SMatthew Brost kfree(siblings); 721e5e32171SMatthew Brost 722e5e32171SMatthew Brost return err; 723e5e32171SMatthew Brost } 724e5e32171SMatthew Brost 725d4433c76SJason Ekstrand static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { 726d4433c76SJason Ekstrand [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, 727d4433c76SJason Ekstrand [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, 728e5e32171SMatthew Brost [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] = 729e5e32171SMatthew Brost set_proto_ctx_engines_parallel_submit, 730d4433c76SJason Ekstrand }; 731d4433c76SJason Ekstrand 732d4433c76SJason Ekstrand static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, 733d4433c76SJason Ekstrand struct i915_gem_proto_context *pc, 734d4433c76SJason Ekstrand const struct drm_i915_gem_context_param *args) 735d4433c76SJason Ekstrand { 736d4433c76SJason Ekstrand struct drm_i915_private *i915 = fpriv->dev_priv; 737d4433c76SJason Ekstrand struct set_proto_ctx_engines set = { .i915 = i915 }; 738d4433c76SJason Ekstrand struct i915_context_param_engines __user *user = 739d4433c76SJason Ekstrand u64_to_user_ptr(args->value); 740d4433c76SJason Ekstrand unsigned int n; 741d4433c76SJason Ekstrand u64 extensions; 742d4433c76SJason Ekstrand int err; 743d4433c76SJason Ekstrand 744d4433c76SJason Ekstrand if (pc->num_user_engines >= 0) { 745d4433c76SJason Ekstrand drm_dbg(&i915->drm, "Cannot set engines twice"); 746d4433c76SJason Ekstrand return -EINVAL; 747d4433c76SJason Ekstrand } 748d4433c76SJason Ekstrand 749d4433c76SJason Ekstrand if (args->size < sizeof(*user) || 750d4433c76SJason Ekstrand !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) { 751d4433c76SJason Ekstrand drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 752d4433c76SJason Ekstrand args->size); 753d4433c76SJason Ekstrand return -EINVAL; 754d4433c76SJason Ekstrand } 755d4433c76SJason Ekstrand 756d4433c76SJason Ekstrand set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 757d4433c76SJason Ekstrand /* RING_MASK has no shift so we can use it directly here */ 758d4433c76SJason Ekstrand if (set.num_engines > I915_EXEC_RING_MASK + 1) 759d4433c76SJason Ekstrand return -EINVAL; 760d4433c76SJason Ekstrand 761d4433c76SJason Ekstrand set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL); 762d4433c76SJason Ekstrand if (!set.engines) 763d4433c76SJason Ekstrand return -ENOMEM; 764d4433c76SJason Ekstrand 765d4433c76SJason Ekstrand for (n = 0; n < set.num_engines; n++) { 766d4433c76SJason Ekstrand struct i915_engine_class_instance ci; 767d4433c76SJason Ekstrand struct intel_engine_cs *engine; 768d4433c76SJason Ekstrand 769d4433c76SJason Ekstrand if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 770d4433c76SJason Ekstrand kfree(set.engines); 771d4433c76SJason Ekstrand return -EFAULT; 772d4433c76SJason Ekstrand } 773d4433c76SJason Ekstrand 774d4433c76SJason Ekstrand memset(&set.engines[n], 0, sizeof(set.engines[n])); 775d4433c76SJason Ekstrand 776d4433c76SJason Ekstrand if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 777d4433c76SJason Ekstrand ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) 778d4433c76SJason Ekstrand continue; 779d4433c76SJason Ekstrand 780d4433c76SJason Ekstrand engine = intel_engine_lookup_user(i915, 781d4433c76SJason Ekstrand ci.engine_class, 782d4433c76SJason Ekstrand ci.engine_instance); 783d4433c76SJason Ekstrand if (!engine) { 784d4433c76SJason Ekstrand drm_dbg(&i915->drm, 785d4433c76SJason Ekstrand "Invalid engine[%d]: { class:%d, instance:%d }\n", 786d4433c76SJason Ekstrand n, ci.engine_class, ci.engine_instance); 787d4433c76SJason Ekstrand kfree(set.engines); 788d4433c76SJason Ekstrand return -ENOENT; 789d4433c76SJason Ekstrand } 790d4433c76SJason Ekstrand 791d4433c76SJason Ekstrand set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL; 792d4433c76SJason Ekstrand set.engines[n].engine = engine; 793d4433c76SJason Ekstrand } 794d4433c76SJason Ekstrand 795d4433c76SJason Ekstrand err = -EFAULT; 796d4433c76SJason Ekstrand if (!get_user(extensions, &user->extensions)) 797d4433c76SJason Ekstrand err = i915_user_extensions(u64_to_user_ptr(extensions), 798d4433c76SJason Ekstrand set_proto_ctx_engines_extensions, 799d4433c76SJason Ekstrand ARRAY_SIZE(set_proto_ctx_engines_extensions), 800d4433c76SJason Ekstrand &set); 801d4433c76SJason Ekstrand if (err) { 802d4433c76SJason Ekstrand kfree(set.engines); 803d4433c76SJason Ekstrand return err; 804d4433c76SJason Ekstrand } 805d4433c76SJason Ekstrand 806d4433c76SJason Ekstrand pc->num_user_engines = set.num_engines; 807d4433c76SJason Ekstrand pc->user_engines = set.engines; 808d4433c76SJason Ekstrand 809d4433c76SJason Ekstrand return 0; 810d4433c76SJason Ekstrand } 811d4433c76SJason Ekstrand 812d4433c76SJason Ekstrand static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, 813d4433c76SJason Ekstrand struct i915_gem_proto_context *pc, 814d4433c76SJason Ekstrand struct drm_i915_gem_context_param *args) 815d4433c76SJason Ekstrand { 816d4433c76SJason Ekstrand struct drm_i915_private *i915 = fpriv->dev_priv; 817d4433c76SJason Ekstrand struct drm_i915_gem_context_param_sseu user_sseu; 818d4433c76SJason Ekstrand struct intel_sseu *sseu; 819d4433c76SJason Ekstrand int ret; 820d4433c76SJason Ekstrand 821d4433c76SJason Ekstrand if (args->size < sizeof(user_sseu)) 822d4433c76SJason Ekstrand return -EINVAL; 823d4433c76SJason Ekstrand 824d4433c76SJason Ekstrand if (GRAPHICS_VER(i915) != 11) 825d4433c76SJason Ekstrand return -ENODEV; 826d4433c76SJason Ekstrand 827d4433c76SJason Ekstrand if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 828d4433c76SJason Ekstrand sizeof(user_sseu))) 829d4433c76SJason Ekstrand return -EFAULT; 830d4433c76SJason Ekstrand 831d4433c76SJason Ekstrand if (user_sseu.rsvd) 832d4433c76SJason Ekstrand return -EINVAL; 833d4433c76SJason Ekstrand 834d4433c76SJason Ekstrand if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 835d4433c76SJason Ekstrand return -EINVAL; 836d4433c76SJason Ekstrand 837d4433c76SJason Ekstrand if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0)) 838d4433c76SJason Ekstrand return -EINVAL; 839d4433c76SJason Ekstrand 840d4433c76SJason Ekstrand if (pc->num_user_engines >= 0) { 841d4433c76SJason Ekstrand int idx = user_sseu.engine.engine_instance; 842d4433c76SJason Ekstrand struct i915_gem_proto_engine *pe; 843d4433c76SJason Ekstrand 844d4433c76SJason Ekstrand if (idx >= pc->num_user_engines) 845d4433c76SJason Ekstrand return -EINVAL; 846d4433c76SJason Ekstrand 847d4433c76SJason Ekstrand pe = &pc->user_engines[idx]; 848d4433c76SJason Ekstrand 849d4433c76SJason Ekstrand /* Only render engine supports RPCS configuration. */ 850d4433c76SJason Ekstrand if (pe->engine->class != RENDER_CLASS) 851d4433c76SJason Ekstrand return -EINVAL; 852d4433c76SJason Ekstrand 853d4433c76SJason Ekstrand sseu = &pe->sseu; 854d4433c76SJason Ekstrand } else { 855d4433c76SJason Ekstrand /* Only render engine supports RPCS configuration. */ 856d4433c76SJason Ekstrand if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER) 857d4433c76SJason Ekstrand return -EINVAL; 858d4433c76SJason Ekstrand 859d4433c76SJason Ekstrand /* There is only one render engine */ 860d4433c76SJason Ekstrand if (user_sseu.engine.engine_instance != 0) 861d4433c76SJason Ekstrand return -EINVAL; 862d4433c76SJason Ekstrand 863d4433c76SJason Ekstrand sseu = &pc->legacy_rcs_sseu; 864d4433c76SJason Ekstrand } 865d4433c76SJason Ekstrand 8661a9c4db4SMichał Winiarski ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu); 867d4433c76SJason Ekstrand if (ret) 868d4433c76SJason Ekstrand return ret; 869d4433c76SJason Ekstrand 870d4433c76SJason Ekstrand args->size = sizeof(user_sseu); 871d4433c76SJason Ekstrand 872d4433c76SJason Ekstrand return 0; 873d4433c76SJason Ekstrand } 874d4433c76SJason Ekstrand 875d4433c76SJason Ekstrand static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, 876d4433c76SJason Ekstrand struct i915_gem_proto_context *pc, 877d4433c76SJason Ekstrand struct drm_i915_gem_context_param *args) 878d4433c76SJason Ekstrand { 879d4433c76SJason Ekstrand int ret = 0; 880d4433c76SJason Ekstrand 881d4433c76SJason Ekstrand switch (args->param) { 882d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 883d4433c76SJason Ekstrand if (args->size) 884d4433c76SJason Ekstrand ret = -EINVAL; 885d4433c76SJason Ekstrand else if (args->value) 886d4433c76SJason Ekstrand pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE); 887d4433c76SJason Ekstrand else 888d4433c76SJason Ekstrand pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE); 889d4433c76SJason Ekstrand break; 890d4433c76SJason Ekstrand 891d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_BANNABLE: 892d4433c76SJason Ekstrand if (args->size) 893d4433c76SJason Ekstrand ret = -EINVAL; 894d4433c76SJason Ekstrand else if (!capable(CAP_SYS_ADMIN) && !args->value) 895d4433c76SJason Ekstrand ret = -EPERM; 896d4433c76SJason Ekstrand else if (args->value) 897d4433c76SJason Ekstrand pc->user_flags |= BIT(UCONTEXT_BANNABLE); 898d3ac8d42SDaniele Ceraolo Spurio else if (pc->uses_protected_content) 899d3ac8d42SDaniele Ceraolo Spurio ret = -EPERM; 900d4433c76SJason Ekstrand else 901d4433c76SJason Ekstrand pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); 902d4433c76SJason Ekstrand break; 903d4433c76SJason Ekstrand 904d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_RECOVERABLE: 905d4433c76SJason Ekstrand if (args->size) 906d4433c76SJason Ekstrand ret = -EINVAL; 907d3ac8d42SDaniele Ceraolo Spurio else if (!args->value) 908d4433c76SJason Ekstrand pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); 909d3ac8d42SDaniele Ceraolo Spurio else if (pc->uses_protected_content) 910d3ac8d42SDaniele Ceraolo Spurio ret = -EPERM; 911d3ac8d42SDaniele Ceraolo Spurio else 912d3ac8d42SDaniele Ceraolo Spurio pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); 913d4433c76SJason Ekstrand break; 914d4433c76SJason Ekstrand 915d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_PRIORITY: 916d4433c76SJason Ekstrand ret = validate_priority(fpriv->dev_priv, args); 917d4433c76SJason Ekstrand if (!ret) 918d4433c76SJason Ekstrand pc->sched.priority = args->value; 919d4433c76SJason Ekstrand break; 920d4433c76SJason Ekstrand 921d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_SSEU: 922d4433c76SJason Ekstrand ret = set_proto_ctx_sseu(fpriv, pc, args); 923d4433c76SJason Ekstrand break; 924d4433c76SJason Ekstrand 925d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_VM: 926d4433c76SJason Ekstrand ret = set_proto_ctx_vm(fpriv, pc, args); 927d4433c76SJason Ekstrand break; 928d4433c76SJason Ekstrand 929d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_ENGINES: 930d4433c76SJason Ekstrand ret = set_proto_ctx_engines(fpriv, pc, args); 931d4433c76SJason Ekstrand break; 932d4433c76SJason Ekstrand 933d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_PERSISTENCE: 934d4433c76SJason Ekstrand if (args->size) 935d4433c76SJason Ekstrand ret = -EINVAL; 9367482a656Skatrinzhou else 937d4433c76SJason Ekstrand ret = proto_context_set_persistence(fpriv->dev_priv, pc, 938d4433c76SJason Ekstrand args->value); 939d4433c76SJason Ekstrand break; 940d4433c76SJason Ekstrand 941d3ac8d42SDaniele Ceraolo Spurio case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 942d3ac8d42SDaniele Ceraolo Spurio ret = proto_context_set_protected(fpriv->dev_priv, pc, 943d3ac8d42SDaniele Ceraolo Spurio args->value); 944d3ac8d42SDaniele Ceraolo Spurio break; 945d3ac8d42SDaniele Ceraolo Spurio 946d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_NO_ZEROMAP: 947d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_BAN_PERIOD: 948d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_RINGSIZE: 949d4433c76SJason Ekstrand default: 950d4433c76SJason Ekstrand ret = -EINVAL; 951d4433c76SJason Ekstrand break; 952d4433c76SJason Ekstrand } 953d4433c76SJason Ekstrand 954d4433c76SJason Ekstrand return ret; 955d4433c76SJason Ekstrand } 956d4433c76SJason Ekstrand 957263ae12cSJason Ekstrand static int intel_context_set_gem(struct intel_context *ce, 958263ae12cSJason Ekstrand struct i915_gem_context *ctx, 959263ae12cSJason Ekstrand struct intel_sseu sseu) 960e6ba7648SChris Wilson { 961263ae12cSJason Ekstrand int ret = 0; 962263ae12cSJason Ekstrand 9636a8679c0SChris Wilson GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 9646a8679c0SChris Wilson RCU_INIT_POINTER(ce->gem_context, ctx); 965e6ba7648SChris Wilson 966e5e32171SMatthew Brost GEM_BUG_ON(intel_context_is_pinned(ce)); 96774e4b909SJason Ekstrand ce->ring_size = SZ_16K; 968e6ba7648SChris Wilson 969e6ba7648SChris Wilson i915_vm_put(ce->vm); 9700483a301SDaniel Vetter ce->vm = i915_gem_context_get_eb_vm(ctx); 971e6ba7648SChris Wilson 972e6ba7648SChris Wilson if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 9734dbd3944SMatthew Brost intel_engine_has_timeslices(ce->engine) && 9744dbd3944SMatthew Brost intel_engine_has_semaphores(ce->engine)) 975e6ba7648SChris Wilson __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 976e8dbb566STvrtko Ursulin 9771a839e01SLucas De Marchi if (CONFIG_DRM_I915_REQUEST_TIMEOUT && 978677db6adSJason Ekstrand ctx->i915->params.request_timeout_ms) { 979677db6adSJason Ekstrand unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; 980677db6adSJason Ekstrand 981677db6adSJason Ekstrand intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000); 982677db6adSJason Ekstrand } 983263ae12cSJason Ekstrand 984263ae12cSJason Ekstrand /* A valid SSEU has no zero fields */ 985263ae12cSJason Ekstrand if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) 986263ae12cSJason Ekstrand ret = intel_context_reconfigure_sseu(ce, sseu); 987263ae12cSJason Ekstrand 988263ae12cSJason Ekstrand return ret; 989e6ba7648SChris Wilson } 990e6ba7648SChris Wilson 991e5e32171SMatthew Brost static void __unpin_engines(struct i915_gem_engines *e, unsigned int count) 992e5e32171SMatthew Brost { 993e5e32171SMatthew Brost while (count--) { 994e5e32171SMatthew Brost struct intel_context *ce = e->engines[count], *child; 995e5e32171SMatthew Brost 996e5e32171SMatthew Brost if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) 997e5e32171SMatthew Brost continue; 998e5e32171SMatthew Brost 999e5e32171SMatthew Brost for_each_child(ce, child) 1000e5e32171SMatthew Brost intel_context_unpin(child); 1001e5e32171SMatthew Brost intel_context_unpin(ce); 1002e5e32171SMatthew Brost } 1003e5e32171SMatthew Brost } 1004e5e32171SMatthew Brost 1005e5e32171SMatthew Brost static void unpin_engines(struct i915_gem_engines *e) 1006e5e32171SMatthew Brost { 1007e5e32171SMatthew Brost __unpin_engines(e, e->num_engines); 1008e5e32171SMatthew Brost } 1009e5e32171SMatthew Brost 101010be98a7SChris Wilson static void __free_engines(struct i915_gem_engines *e, unsigned int count) 101110be98a7SChris Wilson { 101210be98a7SChris Wilson while (count--) { 101310be98a7SChris Wilson if (!e->engines[count]) 101410be98a7SChris Wilson continue; 101510be98a7SChris Wilson 101610be98a7SChris Wilson intel_context_put(e->engines[count]); 101710be98a7SChris Wilson } 101810be98a7SChris Wilson kfree(e); 101910be98a7SChris Wilson } 102010be98a7SChris Wilson 102110be98a7SChris Wilson static void free_engines(struct i915_gem_engines *e) 102210be98a7SChris Wilson { 102310be98a7SChris Wilson __free_engines(e, e->num_engines); 102410be98a7SChris Wilson } 102510be98a7SChris Wilson 1026155ab883SChris Wilson static void free_engines_rcu(struct rcu_head *rcu) 102710be98a7SChris Wilson { 1028130a95e9SChris Wilson struct i915_gem_engines *engines = 1029130a95e9SChris Wilson container_of(rcu, struct i915_gem_engines, rcu); 1030130a95e9SChris Wilson 1031130a95e9SChris Wilson i915_sw_fence_fini(&engines->fence); 1032130a95e9SChris Wilson free_engines(engines); 103310be98a7SChris Wilson } 103410be98a7SChris Wilson 10358399eec8STvrtko Ursulin static void accumulate_runtime(struct i915_drm_client *client, 10368399eec8STvrtko Ursulin struct i915_gem_engines *engines) 10378399eec8STvrtko Ursulin { 10388399eec8STvrtko Ursulin struct i915_gem_engines_iter it; 10398399eec8STvrtko Ursulin struct intel_context *ce; 10408399eec8STvrtko Ursulin 10418399eec8STvrtko Ursulin if (!client) 10428399eec8STvrtko Ursulin return; 10438399eec8STvrtko Ursulin 10448399eec8STvrtko Ursulin /* Transfer accumulated runtime to the parent GEM context. */ 10458399eec8STvrtko Ursulin for_each_gem_engine(ce, engines, it) { 10468399eec8STvrtko Ursulin unsigned int class = ce->engine->uabi_class; 10478399eec8STvrtko Ursulin 10488399eec8STvrtko Ursulin GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime)); 10498399eec8STvrtko Ursulin atomic64_add(intel_context_get_total_runtime_ns(ce), 10508399eec8STvrtko Ursulin &client->past_runtime[class]); 10518399eec8STvrtko Ursulin } 10528399eec8STvrtko Ursulin } 10538399eec8STvrtko Ursulin 105444505168SMatthew Brost static int 105570c96e39SChris Wilson engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 105670c96e39SChris Wilson { 105770c96e39SChris Wilson struct i915_gem_engines *engines = 105870c96e39SChris Wilson container_of(fence, typeof(*engines), fence); 10598399eec8STvrtko Ursulin struct i915_gem_context *ctx = engines->ctx; 106070c96e39SChris Wilson 106170c96e39SChris Wilson switch (state) { 106270c96e39SChris Wilson case FENCE_COMPLETE: 106370c96e39SChris Wilson if (!list_empty(&engines->link)) { 106470c96e39SChris Wilson unsigned long flags; 106570c96e39SChris Wilson 106670c96e39SChris Wilson spin_lock_irqsave(&ctx->stale.lock, flags); 106770c96e39SChris Wilson list_del(&engines->link); 106870c96e39SChris Wilson spin_unlock_irqrestore(&ctx->stale.lock, flags); 106970c96e39SChris Wilson } 10708399eec8STvrtko Ursulin accumulate_runtime(ctx->client, engines); 10718399eec8STvrtko Ursulin i915_gem_context_put(ctx); 10728399eec8STvrtko Ursulin 107370c96e39SChris Wilson break; 107470c96e39SChris Wilson 107570c96e39SChris Wilson case FENCE_FREE: 107670c96e39SChris Wilson init_rcu_head(&engines->rcu); 107770c96e39SChris Wilson call_rcu(&engines->rcu, free_engines_rcu); 107870c96e39SChris Wilson break; 107970c96e39SChris Wilson } 108070c96e39SChris Wilson 108170c96e39SChris Wilson return NOTIFY_DONE; 108270c96e39SChris Wilson } 108370c96e39SChris Wilson 108470c96e39SChris Wilson static struct i915_gem_engines *alloc_engines(unsigned int count) 108570c96e39SChris Wilson { 108670c96e39SChris Wilson struct i915_gem_engines *e; 108770c96e39SChris Wilson 108870c96e39SChris Wilson e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); 108970c96e39SChris Wilson if (!e) 109070c96e39SChris Wilson return NULL; 109170c96e39SChris Wilson 109270c96e39SChris Wilson i915_sw_fence_init(&e->fence, engines_notify); 109370c96e39SChris Wilson return e; 109470c96e39SChris Wilson } 109570c96e39SChris Wilson 1096263ae12cSJason Ekstrand static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx, 1097263ae12cSJason Ekstrand struct intel_sseu rcs_sseu) 109810be98a7SChris Wilson { 10991a9c4db4SMichał Winiarski const struct intel_gt *gt = to_gt(ctx->i915); 110010be98a7SChris Wilson struct intel_engine_cs *engine; 110107a635a8SJason Ekstrand struct i915_gem_engines *e, *err; 110210be98a7SChris Wilson enum intel_engine_id id; 110310be98a7SChris Wilson 110470c96e39SChris Wilson e = alloc_engines(I915_NUM_ENGINES); 110510be98a7SChris Wilson if (!e) 110610be98a7SChris Wilson return ERR_PTR(-ENOMEM); 110710be98a7SChris Wilson 1108f1c4d157SChris Wilson for_each_engine(engine, gt, id) { 110910be98a7SChris Wilson struct intel_context *ce; 1110263ae12cSJason Ekstrand struct intel_sseu sseu = {}; 1111263ae12cSJason Ekstrand int ret; 111210be98a7SChris Wilson 1113a50134b1STvrtko Ursulin if (engine->legacy_idx == INVALID_ENGINE) 1114a50134b1STvrtko Ursulin continue; 1115a50134b1STvrtko Ursulin 1116a50134b1STvrtko Ursulin GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 1117a50134b1STvrtko Ursulin GEM_BUG_ON(e->engines[engine->legacy_idx]); 1118a50134b1STvrtko Ursulin 1119e6ba7648SChris Wilson ce = intel_context_create(engine); 112010be98a7SChris Wilson if (IS_ERR(ce)) { 112107a635a8SJason Ekstrand err = ERR_CAST(ce); 112207a635a8SJason Ekstrand goto free_engines; 112310be98a7SChris Wilson } 112410be98a7SChris Wilson 1125a50134b1STvrtko Ursulin e->engines[engine->legacy_idx] = ce; 112607a635a8SJason Ekstrand e->num_engines = max(e->num_engines, engine->legacy_idx + 1); 1127263ae12cSJason Ekstrand 1128263ae12cSJason Ekstrand if (engine->class == RENDER_CLASS) 1129263ae12cSJason Ekstrand sseu = rcs_sseu; 1130263ae12cSJason Ekstrand 1131263ae12cSJason Ekstrand ret = intel_context_set_gem(ce, ctx, sseu); 1132263ae12cSJason Ekstrand if (ret) { 1133263ae12cSJason Ekstrand err = ERR_PTR(ret); 1134263ae12cSJason Ekstrand goto free_engines; 1135263ae12cSJason Ekstrand } 1136263ae12cSJason Ekstrand 113710be98a7SChris Wilson } 113810be98a7SChris Wilson 113910be98a7SChris Wilson return e; 114007a635a8SJason Ekstrand 114107a635a8SJason Ekstrand free_engines: 114207a635a8SJason Ekstrand free_engines(e); 114307a635a8SJason Ekstrand return err; 114410be98a7SChris Wilson } 114510be98a7SChris Wilson 1146e5e32171SMatthew Brost static int perma_pin_contexts(struct intel_context *ce) 1147e5e32171SMatthew Brost { 1148e5e32171SMatthew Brost struct intel_context *child; 1149e5e32171SMatthew Brost int i = 0, j = 0, ret; 1150e5e32171SMatthew Brost 1151e5e32171SMatthew Brost GEM_BUG_ON(!intel_context_is_parent(ce)); 1152e5e32171SMatthew Brost 1153e5e32171SMatthew Brost ret = intel_context_pin(ce); 1154e5e32171SMatthew Brost if (unlikely(ret)) 1155e5e32171SMatthew Brost return ret; 1156e5e32171SMatthew Brost 1157e5e32171SMatthew Brost for_each_child(ce, child) { 1158e5e32171SMatthew Brost ret = intel_context_pin(child); 1159e5e32171SMatthew Brost if (unlikely(ret)) 1160e5e32171SMatthew Brost goto unwind; 1161e5e32171SMatthew Brost ++i; 1162e5e32171SMatthew Brost } 1163e5e32171SMatthew Brost 1164e5e32171SMatthew Brost set_bit(CONTEXT_PERMA_PIN, &ce->flags); 1165e5e32171SMatthew Brost 1166e5e32171SMatthew Brost return 0; 1167e5e32171SMatthew Brost 1168e5e32171SMatthew Brost unwind: 1169e5e32171SMatthew Brost intel_context_unpin(ce); 1170e5e32171SMatthew Brost for_each_child(ce, child) { 1171e5e32171SMatthew Brost if (j++ < i) 1172e5e32171SMatthew Brost intel_context_unpin(child); 1173e5e32171SMatthew Brost else 1174e5e32171SMatthew Brost break; 1175e5e32171SMatthew Brost } 1176e5e32171SMatthew Brost 1177e5e32171SMatthew Brost return ret; 1178e5e32171SMatthew Brost } 1179e5e32171SMatthew Brost 1180d4433c76SJason Ekstrand static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, 1181d4433c76SJason Ekstrand unsigned int num_engines, 1182d4433c76SJason Ekstrand struct i915_gem_proto_engine *pe) 1183d4433c76SJason Ekstrand { 1184d4433c76SJason Ekstrand struct i915_gem_engines *e, *err; 1185d4433c76SJason Ekstrand unsigned int n; 1186d4433c76SJason Ekstrand 1187d4433c76SJason Ekstrand e = alloc_engines(num_engines); 118884edf537SMatthew Brost if (!e) 118984edf537SMatthew Brost return ERR_PTR(-ENOMEM); 119084edf537SMatthew Brost e->num_engines = num_engines; 119184edf537SMatthew Brost 1192d4433c76SJason Ekstrand for (n = 0; n < num_engines; n++) { 1193e5e32171SMatthew Brost struct intel_context *ce, *child; 1194d4433c76SJason Ekstrand int ret; 1195d4433c76SJason Ekstrand 1196d4433c76SJason Ekstrand switch (pe[n].type) { 1197d4433c76SJason Ekstrand case I915_GEM_ENGINE_TYPE_PHYSICAL: 1198d4433c76SJason Ekstrand ce = intel_context_create(pe[n].engine); 1199d4433c76SJason Ekstrand break; 1200d4433c76SJason Ekstrand 1201d4433c76SJason Ekstrand case I915_GEM_ENGINE_TYPE_BALANCED: 120255612025SMatthew Brost ce = intel_engine_create_virtual(pe[n].siblings, 1203e5e32171SMatthew Brost pe[n].num_siblings, 0); 1204e5e32171SMatthew Brost break; 1205e5e32171SMatthew Brost 1206e5e32171SMatthew Brost case I915_GEM_ENGINE_TYPE_PARALLEL: 1207e5e32171SMatthew Brost ce = intel_engine_create_parallel(pe[n].siblings, 1208e5e32171SMatthew Brost pe[n].num_siblings, 1209e5e32171SMatthew Brost pe[n].width); 1210d4433c76SJason Ekstrand break; 1211d4433c76SJason Ekstrand 1212d4433c76SJason Ekstrand case I915_GEM_ENGINE_TYPE_INVALID: 1213d4433c76SJason Ekstrand default: 1214d4433c76SJason Ekstrand GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID); 1215d4433c76SJason Ekstrand continue; 1216d4433c76SJason Ekstrand } 1217d4433c76SJason Ekstrand 1218d4433c76SJason Ekstrand if (IS_ERR(ce)) { 1219d4433c76SJason Ekstrand err = ERR_CAST(ce); 1220d4433c76SJason Ekstrand goto free_engines; 1221d4433c76SJason Ekstrand } 1222d4433c76SJason Ekstrand 1223d4433c76SJason Ekstrand e->engines[n] = ce; 1224d4433c76SJason Ekstrand 1225d4433c76SJason Ekstrand ret = intel_context_set_gem(ce, ctx, pe->sseu); 1226d4433c76SJason Ekstrand if (ret) { 1227d4433c76SJason Ekstrand err = ERR_PTR(ret); 1228d4433c76SJason Ekstrand goto free_engines; 1229d4433c76SJason Ekstrand } 1230e5e32171SMatthew Brost for_each_child(ce, child) { 1231e5e32171SMatthew Brost ret = intel_context_set_gem(child, ctx, pe->sseu); 1232e5e32171SMatthew Brost if (ret) { 1233e5e32171SMatthew Brost err = ERR_PTR(ret); 1234e5e32171SMatthew Brost goto free_engines; 1235e5e32171SMatthew Brost } 1236e5e32171SMatthew Brost } 1237e5e32171SMatthew Brost 1238e5e32171SMatthew Brost /* 1239e5e32171SMatthew Brost * XXX: Must be done after calling intel_context_set_gem as that 1240e5e32171SMatthew Brost * function changes the ring size. The ring is allocated when 1241e5e32171SMatthew Brost * the context is pinned. If the ring size is changed after 1242e5e32171SMatthew Brost * allocation we have a mismatch of the ring size and will cause 1243e5e32171SMatthew Brost * the context to hang. Presumably with a bit of reordering we 1244e5e32171SMatthew Brost * could move the perma-pin step to the backend function 1245e5e32171SMatthew Brost * intel_engine_create_parallel. 1246e5e32171SMatthew Brost */ 1247e5e32171SMatthew Brost if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) { 1248e5e32171SMatthew Brost ret = perma_pin_contexts(ce); 1249e5e32171SMatthew Brost if (ret) { 1250e5e32171SMatthew Brost err = ERR_PTR(ret); 1251e5e32171SMatthew Brost goto free_engines; 1252e5e32171SMatthew Brost } 1253e5e32171SMatthew Brost } 1254d4433c76SJason Ekstrand } 1255d4433c76SJason Ekstrand 1256d4433c76SJason Ekstrand return e; 1257d4433c76SJason Ekstrand 1258d4433c76SJason Ekstrand free_engines: 1259d4433c76SJason Ekstrand free_engines(e); 1260d4433c76SJason Ekstrand return err; 1261d4433c76SJason Ekstrand } 1262d4433c76SJason Ekstrand 126375eefd82SDaniel Vetter static void i915_gem_context_release_work(struct work_struct *work) 126410be98a7SChris Wilson { 126575eefd82SDaniel Vetter struct i915_gem_context *ctx = container_of(work, typeof(*ctx), 126675eefd82SDaniel Vetter release_work); 12678cf97637SDaniel Vetter struct i915_address_space *vm; 126810be98a7SChris Wilson 1269f8246cf4SChris Wilson trace_i915_context_free(ctx); 1270f8246cf4SChris Wilson GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 1271a4e7ccdaSChris Wilson 1272ad3aa7c3SChris Wilson spin_lock(&ctx->i915->gem.contexts.lock); 1273ad3aa7c3SChris Wilson list_del(&ctx->link); 1274ad3aa7c3SChris Wilson spin_unlock(&ctx->i915->gem.contexts.lock); 1275ad3aa7c3SChris Wilson 1276c238980eSDaniel Vetter if (ctx->syncobj) 1277c238980eSDaniel Vetter drm_syncobj_put(ctx->syncobj); 1278c238980eSDaniel Vetter 12799ec8795eSDaniel Vetter vm = ctx->vm; 12808cf97637SDaniel Vetter if (vm) 12818cf97637SDaniel Vetter i915_vm_put(vm); 12828cf97637SDaniel Vetter 1283d3ac8d42SDaniele Ceraolo Spurio if (ctx->pxp_wakeref) 1284d3ac8d42SDaniele Ceraolo Spurio intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref); 1285d3ac8d42SDaniele Ceraolo Spurio 128643c50460STvrtko Ursulin if (ctx->client) 128743c50460STvrtko Ursulin i915_drm_client_put(ctx->client); 128843c50460STvrtko Ursulin 128910be98a7SChris Wilson mutex_destroy(&ctx->engines_mutex); 1290f7ce8639SChris Wilson mutex_destroy(&ctx->lut_mutex); 129110be98a7SChris Wilson 129210be98a7SChris Wilson put_pid(ctx->pid); 129310be98a7SChris Wilson mutex_destroy(&ctx->mutex); 129410be98a7SChris Wilson 129510be98a7SChris Wilson kfree_rcu(ctx, rcu); 129610be98a7SChris Wilson } 129710be98a7SChris Wilson 129875eefd82SDaniel Vetter void i915_gem_context_release(struct kref *ref) 129975eefd82SDaniel Vetter { 130075eefd82SDaniel Vetter struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 130175eefd82SDaniel Vetter 130275eefd82SDaniel Vetter queue_work(ctx->i915->wq, &ctx->release_work); 130375eefd82SDaniel Vetter } 130475eefd82SDaniel Vetter 13052e0986a5SChris Wilson static inline struct i915_gem_engines * 13062e0986a5SChris Wilson __context_engines_static(const struct i915_gem_context *ctx) 13072e0986a5SChris Wilson { 13082e0986a5SChris Wilson return rcu_dereference_protected(ctx->engines, true); 13092e0986a5SChris Wilson } 13102e0986a5SChris Wilson 13112e0986a5SChris Wilson static void __reset_context(struct i915_gem_context *ctx, 13122e0986a5SChris Wilson struct intel_engine_cs *engine) 13132e0986a5SChris Wilson { 13142e0986a5SChris Wilson intel_gt_handle_error(engine->gt, engine->mask, 0, 13152e0986a5SChris Wilson "context closure in %s", ctx->name); 13162e0986a5SChris Wilson } 13172e0986a5SChris Wilson 13182e0986a5SChris Wilson static bool __cancel_engine(struct intel_engine_cs *engine) 13192e0986a5SChris Wilson { 13202e0986a5SChris Wilson /* 13212e0986a5SChris Wilson * Send a "high priority pulse" down the engine to cause the 13222e0986a5SChris Wilson * current request to be momentarily preempted. (If it fails to 13232e0986a5SChris Wilson * be preempted, it will be reset). As we have marked our context 13242e0986a5SChris Wilson * as banned, any incomplete request, including any running, will 13252e0986a5SChris Wilson * be skipped following the preemption. 13262e0986a5SChris Wilson * 13272e0986a5SChris Wilson * If there is no hangchecking (one of the reasons why we try to 13282e0986a5SChris Wilson * cancel the context) and no forced preemption, there may be no 13292e0986a5SChris Wilson * means by which we reset the GPU and evict the persistent hog. 13302e0986a5SChris Wilson * Ergo if we are unable to inject a preemptive pulse that can 13312e0986a5SChris Wilson * kill the banned context, we fallback to doing a local reset 13322e0986a5SChris Wilson * instead. 13332e0986a5SChris Wilson */ 1334651dabe2SChris Wilson return intel_engine_pulse(engine) == 0; 13352e0986a5SChris Wilson } 13362e0986a5SChris Wilson 13374a317415SChris Wilson static struct intel_engine_cs *active_engine(struct intel_context *ce) 13384a317415SChris Wilson { 13394a317415SChris Wilson struct intel_engine_cs *engine = NULL; 13404a317415SChris Wilson struct i915_request *rq; 13414a317415SChris Wilson 1342cc1557caSChris Wilson if (intel_context_has_inflight(ce)) 1343cc1557caSChris Wilson return intel_context_inflight(ce); 1344cc1557caSChris Wilson 13454a317415SChris Wilson if (!ce->timeline) 13464a317415SChris Wilson return NULL; 13474a317415SChris Wilson 13483cfea8c9SChris Wilson /* 13493cfea8c9SChris Wilson * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference 13503cfea8c9SChris Wilson * to the request to prevent it being transferred to a new timeline 13513cfea8c9SChris Wilson * (and onto a new timeline->requests list). 13523cfea8c9SChris Wilson */ 1353736e785fSChris Wilson rcu_read_lock(); 13543cfea8c9SChris Wilson list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 13553cfea8c9SChris Wilson bool found; 13563cfea8c9SChris Wilson 13573cfea8c9SChris Wilson /* timeline is already completed upto this point? */ 13583cfea8c9SChris Wilson if (!i915_request_get_rcu(rq)) 13593cfea8c9SChris Wilson break; 13604a317415SChris Wilson 13614a317415SChris Wilson /* Check with the backend if the request is inflight */ 13623cfea8c9SChris Wilson found = true; 13633cfea8c9SChris Wilson if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) 13647dbc19daSTvrtko Ursulin found = i915_request_active_engine(rq, &engine); 13653cfea8c9SChris Wilson 13663cfea8c9SChris Wilson i915_request_put(rq); 13673cfea8c9SChris Wilson if (found) 13684a317415SChris Wilson break; 13694a317415SChris Wilson } 1370736e785fSChris Wilson rcu_read_unlock(); 13714a317415SChris Wilson 13724a317415SChris Wilson return engine; 13734a317415SChris Wilson } 13744a317415SChris Wilson 137545c64ecfSTvrtko Ursulin static void 137645c64ecfSTvrtko Ursulin kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent) 13772e0986a5SChris Wilson { 13782e0986a5SChris Wilson struct i915_gem_engines_iter it; 13792e0986a5SChris Wilson struct intel_context *ce; 13802e0986a5SChris Wilson 13812e0986a5SChris Wilson /* 13822e0986a5SChris Wilson * Map the user's engine back to the actual engines; one virtual 13832e0986a5SChris Wilson * engine will be mapped to multiple engines, and using ctx->engine[] 13842e0986a5SChris Wilson * the same engine may be have multiple instances in the user's map. 13852e0986a5SChris Wilson * However, we only care about pending requests, so only include 13862e0986a5SChris Wilson * engines on which there are incomplete requests. 13872e0986a5SChris Wilson */ 138842fb60deSChris Wilson for_each_gem_engine(ce, engines, it) { 13892e0986a5SChris Wilson struct intel_engine_cs *engine; 13902e0986a5SChris Wilson 13910add082cSTvrtko Ursulin if ((exit || !persistent) && intel_context_revoke(ce)) 139245c64ecfSTvrtko Ursulin continue; /* Already marked. */ 13939f3ccd40SChris Wilson 13944a317415SChris Wilson /* 13954a317415SChris Wilson * Check the current active state of this context; if we 13964a317415SChris Wilson * are currently executing on the GPU we need to evict 13974a317415SChris Wilson * ourselves. On the other hand, if we haven't yet been 13984a317415SChris Wilson * submitted to the GPU or if everything is complete, 13994a317415SChris Wilson * we have nothing to do. 14004a317415SChris Wilson */ 14014a317415SChris Wilson engine = active_engine(ce); 14022e0986a5SChris Wilson 14032e0986a5SChris Wilson /* First attempt to gracefully cancel the context */ 140445c64ecfSTvrtko Ursulin if (engine && !__cancel_engine(engine) && (exit || !persistent)) 14052e0986a5SChris Wilson /* 14062e0986a5SChris Wilson * If we are unable to send a preemptive pulse to bump 14072e0986a5SChris Wilson * the context from the GPU, we have to resort to a full 14082e0986a5SChris Wilson * reset. We hope the collateral damage is worth it. 14092e0986a5SChris Wilson */ 141042fb60deSChris Wilson __reset_context(engines->ctx, engine); 14112e0986a5SChris Wilson } 14122e0986a5SChris Wilson } 14132e0986a5SChris Wilson 1414651dabe2SChris Wilson static void kill_context(struct i915_gem_context *ctx) 141542fb60deSChris Wilson { 141642fb60deSChris Wilson struct i915_gem_engines *pos, *next; 141742fb60deSChris Wilson 1418130a95e9SChris Wilson spin_lock_irq(&ctx->stale.lock); 1419130a95e9SChris Wilson GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 142042fb60deSChris Wilson list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { 1421130a95e9SChris Wilson if (!i915_sw_fence_await(&pos->fence)) { 1422130a95e9SChris Wilson list_del_init(&pos->link); 142342fb60deSChris Wilson continue; 1424130a95e9SChris Wilson } 142542fb60deSChris Wilson 1426130a95e9SChris Wilson spin_unlock_irq(&ctx->stale.lock); 142742fb60deSChris Wilson 142845c64ecfSTvrtko Ursulin kill_engines(pos, !ctx->i915->params.enable_hangcheck, 142945c64ecfSTvrtko Ursulin i915_gem_context_is_persistent(ctx)); 143042fb60deSChris Wilson 1431130a95e9SChris Wilson spin_lock_irq(&ctx->stale.lock); 1432130a95e9SChris Wilson GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); 143342fb60deSChris Wilson list_safe_reset_next(pos, next, link); 143442fb60deSChris Wilson list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ 143542fb60deSChris Wilson 143642fb60deSChris Wilson i915_sw_fence_complete(&pos->fence); 143742fb60deSChris Wilson } 1438130a95e9SChris Wilson spin_unlock_irq(&ctx->stale.lock); 143942fb60deSChris Wilson } 144042fb60deSChris Wilson 1441130a95e9SChris Wilson static void engines_idle_release(struct i915_gem_context *ctx, 1442130a95e9SChris Wilson struct i915_gem_engines *engines) 1443130a95e9SChris Wilson { 1444130a95e9SChris Wilson struct i915_gem_engines_iter it; 1445130a95e9SChris Wilson struct intel_context *ce; 1446130a95e9SChris Wilson 1447130a95e9SChris Wilson INIT_LIST_HEAD(&engines->link); 1448130a95e9SChris Wilson 1449130a95e9SChris Wilson engines->ctx = i915_gem_context_get(ctx); 1450130a95e9SChris Wilson 1451130a95e9SChris Wilson for_each_gem_engine(ce, engines, it) { 1452e6829625SChris Wilson int err; 1453130a95e9SChris Wilson 1454130a95e9SChris Wilson /* serialises with execbuf */ 1455*83321094SMatthew Brost intel_context_close(ce); 1456130a95e9SChris Wilson if (!intel_context_pin_if_active(ce)) 1457130a95e9SChris Wilson continue; 1458130a95e9SChris Wilson 1459e6829625SChris Wilson /* Wait until context is finally scheduled out and retired */ 1460e6829625SChris Wilson err = i915_sw_fence_await_active(&engines->fence, 1461e6829625SChris Wilson &ce->active, 1462e6829625SChris Wilson I915_ACTIVE_AWAIT_BARRIER); 1463130a95e9SChris Wilson intel_context_unpin(ce); 1464e6829625SChris Wilson if (err) 1465130a95e9SChris Wilson goto kill; 1466130a95e9SChris Wilson } 1467130a95e9SChris Wilson 1468130a95e9SChris Wilson spin_lock_irq(&ctx->stale.lock); 1469130a95e9SChris Wilson if (!i915_gem_context_is_closed(ctx)) 1470130a95e9SChris Wilson list_add_tail(&engines->link, &ctx->stale.engines); 1471130a95e9SChris Wilson spin_unlock_irq(&ctx->stale.lock); 1472130a95e9SChris Wilson 1473130a95e9SChris Wilson kill: 1474130a95e9SChris Wilson if (list_empty(&engines->link)) /* raced, already closed */ 147545c64ecfSTvrtko Ursulin kill_engines(engines, true, 147645c64ecfSTvrtko Ursulin i915_gem_context_is_persistent(ctx)); 1477130a95e9SChris Wilson 1478130a95e9SChris Wilson i915_sw_fence_commit(&engines->fence); 147942fb60deSChris Wilson } 148042fb60deSChris Wilson 1481267c0126SChris Wilson static void set_closed_name(struct i915_gem_context *ctx) 1482267c0126SChris Wilson { 1483267c0126SChris Wilson char *s; 1484267c0126SChris Wilson 1485267c0126SChris Wilson /* Replace '[]' with '<>' to indicate closed in debug prints */ 1486267c0126SChris Wilson 1487267c0126SChris Wilson s = strrchr(ctx->name, '['); 1488267c0126SChris Wilson if (!s) 1489267c0126SChris Wilson return; 1490267c0126SChris Wilson 1491267c0126SChris Wilson *s = '<'; 1492267c0126SChris Wilson 1493267c0126SChris Wilson s = strchr(s + 1, ']'); 1494267c0126SChris Wilson if (s) 1495267c0126SChris Wilson *s = '>'; 1496267c0126SChris Wilson } 1497267c0126SChris Wilson 149810be98a7SChris Wilson static void context_close(struct i915_gem_context *ctx) 149910be98a7SChris Wilson { 150049bd54b3STvrtko Ursulin struct i915_drm_client *client; 150149bd54b3STvrtko Ursulin 1502130a95e9SChris Wilson /* Flush any concurrent set_engines() */ 1503130a95e9SChris Wilson mutex_lock(&ctx->engines_mutex); 1504e5e32171SMatthew Brost unpin_engines(__context_engines_static(ctx)); 1505130a95e9SChris Wilson engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); 15062850748eSChris Wilson i915_gem_context_set_closed(ctx); 1507130a95e9SChris Wilson mutex_unlock(&ctx->engines_mutex); 15082850748eSChris Wilson 1509155ab883SChris Wilson mutex_lock(&ctx->mutex); 1510155ab883SChris Wilson 1511130a95e9SChris Wilson set_closed_name(ctx); 1512130a95e9SChris Wilson 151310be98a7SChris Wilson /* 151410be98a7SChris Wilson * The LUT uses the VMA as a backpointer to unref the object, 151510be98a7SChris Wilson * so we need to clear the LUT before we close all the VMA (inside 151610be98a7SChris Wilson * the ppgtt). 151710be98a7SChris Wilson */ 151810be98a7SChris Wilson lut_close(ctx); 151910be98a7SChris Wilson 1520e1a7ab4fSThomas Hellström ctx->file_priv = ERR_PTR(-EBADF); 1521e1a7ab4fSThomas Hellström 152249bd54b3STvrtko Ursulin client = ctx->client; 152349bd54b3STvrtko Ursulin if (client) { 152449bd54b3STvrtko Ursulin spin_lock(&client->ctx_lock); 152549bd54b3STvrtko Ursulin list_del_rcu(&ctx->client_link); 152649bd54b3STvrtko Ursulin spin_unlock(&client->ctx_lock); 152749bd54b3STvrtko Ursulin } 152849bd54b3STvrtko Ursulin 1529155ab883SChris Wilson mutex_unlock(&ctx->mutex); 15302e0986a5SChris Wilson 15312e0986a5SChris Wilson /* 15322e0986a5SChris Wilson * If the user has disabled hangchecking, we can not be sure that 15332e0986a5SChris Wilson * the batches will ever complete after the context is closed, 15342e0986a5SChris Wilson * keeping the context and all resources pinned forever. So in this 15352e0986a5SChris Wilson * case we opt to forcibly kill off all remaining requests on 15362e0986a5SChris Wilson * context close. 15372e0986a5SChris Wilson */ 15382e0986a5SChris Wilson kill_context(ctx); 15392e0986a5SChris Wilson 154010be98a7SChris Wilson i915_gem_context_put(ctx); 154110be98a7SChris Wilson } 154210be98a7SChris Wilson 1543a0e04715SChris Wilson static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 1544a0e04715SChris Wilson { 1545a0e04715SChris Wilson if (i915_gem_context_is_persistent(ctx) == state) 1546a0e04715SChris Wilson return 0; 1547a0e04715SChris Wilson 1548a0e04715SChris Wilson if (state) { 1549a0e04715SChris Wilson /* 1550a0e04715SChris Wilson * Only contexts that are short-lived [that will expire or be 1551a0e04715SChris Wilson * reset] are allowed to survive past termination. We require 1552a0e04715SChris Wilson * hangcheck to ensure that the persistent requests are healthy. 1553a0e04715SChris Wilson */ 15548a25c4beSJani Nikula if (!ctx->i915->params.enable_hangcheck) 1555a0e04715SChris Wilson return -EINVAL; 1556a0e04715SChris Wilson 1557a0e04715SChris Wilson i915_gem_context_set_persistence(ctx); 1558a0e04715SChris Wilson } else { 1559a0e04715SChris Wilson /* To cancel a context we use "preempt-to-idle" */ 1560a0e04715SChris Wilson if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 1561a0e04715SChris Wilson return -ENODEV; 1562a0e04715SChris Wilson 1563d1b9b5f1SChris Wilson /* 1564d1b9b5f1SChris Wilson * If the cancel fails, we then need to reset, cleanly! 1565d1b9b5f1SChris Wilson * 1566d1b9b5f1SChris Wilson * If the per-engine reset fails, all hope is lost! We resort 1567d1b9b5f1SChris Wilson * to a full GPU reset in that unlikely case, but realistically 1568d1b9b5f1SChris Wilson * if the engine could not reset, the full reset does not fare 1569d1b9b5f1SChris Wilson * much better. The damage has been done. 1570d1b9b5f1SChris Wilson * 1571d1b9b5f1SChris Wilson * However, if we cannot reset an engine by itself, we cannot 1572d1b9b5f1SChris Wilson * cleanup a hanging persistent context without causing 1573d1b9b5f1SChris Wilson * colateral damage, and we should not pretend we can by 1574d1b9b5f1SChris Wilson * exposing the interface. 1575d1b9b5f1SChris Wilson */ 15761a9c4db4SMichał Winiarski if (!intel_has_reset_engine(to_gt(ctx->i915))) 1577d1b9b5f1SChris Wilson return -ENODEV; 1578d1b9b5f1SChris Wilson 1579a0e04715SChris Wilson i915_gem_context_clear_persistence(ctx); 1580a0e04715SChris Wilson } 1581a0e04715SChris Wilson 1582a0e04715SChris Wilson return 0; 1583a0e04715SChris Wilson } 1584a0e04715SChris Wilson 158510be98a7SChris Wilson static struct i915_gem_context * 1586a34857dcSJason Ekstrand i915_gem_create_context(struct drm_i915_private *i915, 1587a34857dcSJason Ekstrand const struct i915_gem_proto_context *pc) 158810be98a7SChris Wilson { 158910be98a7SChris Wilson struct i915_gem_context *ctx; 15900eee9977SJason Ekstrand struct i915_address_space *vm = NULL; 15910eee9977SJason Ekstrand struct i915_gem_engines *e; 15920eee9977SJason Ekstrand int err; 15930eee9977SJason Ekstrand int i; 159410be98a7SChris Wilson 15950eee9977SJason Ekstrand ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 15960eee9977SJason Ekstrand if (!ctx) 15970eee9977SJason Ekstrand return ERR_PTR(-ENOMEM); 15980eee9977SJason Ekstrand 15990eee9977SJason Ekstrand kref_init(&ctx->ref); 16000eee9977SJason Ekstrand ctx->i915 = i915; 16010eee9977SJason Ekstrand ctx->sched = pc->sched; 16020eee9977SJason Ekstrand mutex_init(&ctx->mutex); 16030eee9977SJason Ekstrand INIT_LIST_HEAD(&ctx->link); 160475eefd82SDaniel Vetter INIT_WORK(&ctx->release_work, i915_gem_context_release_work); 16050eee9977SJason Ekstrand 16060eee9977SJason Ekstrand spin_lock_init(&ctx->stale.lock); 16070eee9977SJason Ekstrand INIT_LIST_HEAD(&ctx->stale.engines); 160810be98a7SChris Wilson 1609a34857dcSJason Ekstrand if (pc->vm) { 16100eee9977SJason Ekstrand vm = i915_vm_get(pc->vm); 1611a34857dcSJason Ekstrand } else if (HAS_FULL_PPGTT(i915)) { 1612ab53497bSChris Wilson struct i915_ppgtt *ppgtt; 161310be98a7SChris Wilson 16141a9c4db4SMichał Winiarski ppgtt = i915_ppgtt_create(to_gt(i915), 0); 161510be98a7SChris Wilson if (IS_ERR(ppgtt)) { 1616baa89ba3SWambui Karuga drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 161710be98a7SChris Wilson PTR_ERR(ppgtt)); 16180eee9977SJason Ekstrand err = PTR_ERR(ppgtt); 16190eee9977SJason Ekstrand goto err_ctx; 16200eee9977SJason Ekstrand } 16210eee9977SJason Ekstrand vm = &ppgtt->vm; 16220eee9977SJason Ekstrand } 1623e1a7ab4fSThomas Hellström if (vm) 1624e1a7ab4fSThomas Hellström ctx->vm = vm; 162510be98a7SChris Wilson 16260eee9977SJason Ekstrand mutex_init(&ctx->engines_mutex); 1627d4433c76SJason Ekstrand if (pc->num_user_engines >= 0) { 1628d4433c76SJason Ekstrand i915_gem_context_set_user_engines(ctx); 16290eee9977SJason Ekstrand e = user_engines(ctx, pc->num_user_engines, pc->user_engines); 16300eee9977SJason Ekstrand } else { 16310eee9977SJason Ekstrand i915_gem_context_clear_user_engines(ctx); 16320eee9977SJason Ekstrand e = default_engines(ctx, pc->legacy_rcs_sseu); 1633d4433c76SJason Ekstrand } 16340eee9977SJason Ekstrand if (IS_ERR(e)) { 16350eee9977SJason Ekstrand err = PTR_ERR(e); 16360eee9977SJason Ekstrand goto err_vm; 16370eee9977SJason Ekstrand } 16380eee9977SJason Ekstrand RCU_INIT_POINTER(ctx->engines, e); 16390eee9977SJason Ekstrand 16400eee9977SJason Ekstrand INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 16410eee9977SJason Ekstrand mutex_init(&ctx->lut_mutex); 16420eee9977SJason Ekstrand 16430eee9977SJason Ekstrand /* NB: Mark all slices as needing a remap so that when the context first 16440eee9977SJason Ekstrand * loads it will restore whatever remap state already exists. If there 16450eee9977SJason Ekstrand * is no remap info, it will be a NOP. */ 16460eee9977SJason Ekstrand ctx->remap_slice = ALL_L3_SLICES(i915); 16470eee9977SJason Ekstrand 16480eee9977SJason Ekstrand ctx->user_flags = pc->user_flags; 16490eee9977SJason Ekstrand 16500eee9977SJason Ekstrand for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 16510eee9977SJason Ekstrand ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 1652d4433c76SJason Ekstrand 1653a34857dcSJason Ekstrand if (pc->single_timeline) { 16540eee9977SJason Ekstrand err = drm_syncobj_create(&ctx->syncobj, 165500dae4d3SJason Ekstrand DRM_SYNCOBJ_CREATE_SIGNALED, 165600dae4d3SJason Ekstrand NULL); 16570eee9977SJason Ekstrand if (err) 16580eee9977SJason Ekstrand goto err_engines; 165910be98a7SChris Wilson } 166010be98a7SChris Wilson 1661d3ac8d42SDaniele Ceraolo Spurio if (pc->uses_protected_content) { 1662d3ac8d42SDaniele Ceraolo Spurio ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1663d3ac8d42SDaniele Ceraolo Spurio ctx->uses_protected_content = true; 1664d3ac8d42SDaniele Ceraolo Spurio } 1665d3ac8d42SDaniele Ceraolo Spurio 166610be98a7SChris Wilson trace_i915_context_create(ctx); 166710be98a7SChris Wilson 166810be98a7SChris Wilson return ctx; 16690eee9977SJason Ekstrand 16700eee9977SJason Ekstrand err_engines: 16710eee9977SJason Ekstrand free_engines(e); 16720eee9977SJason Ekstrand err_vm: 16730eee9977SJason Ekstrand if (ctx->vm) 1674e1a7ab4fSThomas Hellström i915_vm_put(ctx->vm); 16750eee9977SJason Ekstrand err_ctx: 16760eee9977SJason Ekstrand kfree(ctx); 16770eee9977SJason Ekstrand return ERR_PTR(err); 167810be98a7SChris Wilson } 167910be98a7SChris Wilson 1680a4e7ccdaSChris Wilson static void init_contexts(struct i915_gem_contexts *gc) 168110be98a7SChris Wilson { 1682a4e7ccdaSChris Wilson spin_lock_init(&gc->lock); 1683a4e7ccdaSChris Wilson INIT_LIST_HEAD(&gc->list); 168410be98a7SChris Wilson } 168510be98a7SChris Wilson 1686e6ba7648SChris Wilson void i915_gem_init__contexts(struct drm_i915_private *i915) 168710be98a7SChris Wilson { 1688a4e7ccdaSChris Wilson init_contexts(&i915->gem.contexts); 168910be98a7SChris Wilson } 169010be98a7SChris Wilson 1691a4c1cdd3SJason Ekstrand static void gem_context_register(struct i915_gem_context *ctx, 1692c100777cSTvrtko Ursulin struct drm_i915_file_private *fpriv, 1693a4c1cdd3SJason Ekstrand u32 id) 169410be98a7SChris Wilson { 1695eb4dedaeSChris Wilson struct drm_i915_private *i915 = ctx->i915; 1696a4c1cdd3SJason Ekstrand void *old; 169710be98a7SChris Wilson 169810be98a7SChris Wilson ctx->file_priv = fpriv; 1699a4e7ccdaSChris Wilson 170010be98a7SChris Wilson ctx->pid = get_task_pid(current, PIDTYPE_PID); 170143c50460STvrtko Ursulin ctx->client = i915_drm_client_get(fpriv->client); 170243c50460STvrtko Ursulin 1703fc4f125dSChris Wilson snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 170410be98a7SChris Wilson current->comm, pid_nr(ctx->pid)); 170510be98a7SChris Wilson 170610be98a7SChris Wilson /* And finally expose ourselves to userspace via the idr */ 1707a4c1cdd3SJason Ekstrand old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); 1708a4c1cdd3SJason Ekstrand WARN_ON(old); 1709c100777cSTvrtko Ursulin 171049bd54b3STvrtko Ursulin spin_lock(&ctx->client->ctx_lock); 171149bd54b3STvrtko Ursulin list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list); 171249bd54b3STvrtko Ursulin spin_unlock(&ctx->client->ctx_lock); 171349bd54b3STvrtko Ursulin 1714eb4dedaeSChris Wilson spin_lock(&i915->gem.contexts.lock); 1715eb4dedaeSChris Wilson list_add_tail(&ctx->link, &i915->gem.contexts.list); 1716eb4dedaeSChris Wilson spin_unlock(&i915->gem.contexts.lock); 171710be98a7SChris Wilson } 171810be98a7SChris Wilson 171910be98a7SChris Wilson int i915_gem_context_open(struct drm_i915_private *i915, 172010be98a7SChris Wilson struct drm_file *file) 172110be98a7SChris Wilson { 172210be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 1723a34857dcSJason Ekstrand struct i915_gem_proto_context *pc; 172410be98a7SChris Wilson struct i915_gem_context *ctx; 172510be98a7SChris Wilson int err; 172610be98a7SChris Wilson 1727a4c1cdd3SJason Ekstrand mutex_init(&file_priv->proto_context_lock); 1728a4c1cdd3SJason Ekstrand xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC); 1729a4c1cdd3SJason Ekstrand 1730a4c1cdd3SJason Ekstrand /* 0 reserved for the default context */ 1731a4c1cdd3SJason Ekstrand xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1); 1732c100777cSTvrtko Ursulin 17335dbd2b7bSChris Wilson /* 0 reserved for invalid/unassigned ppgtt */ 17345dbd2b7bSChris Wilson xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 173510be98a7SChris Wilson 1736a34857dcSJason Ekstrand pc = proto_context_create(i915, 0); 1737a34857dcSJason Ekstrand if (IS_ERR(pc)) { 1738a34857dcSJason Ekstrand err = PTR_ERR(pc); 1739a34857dcSJason Ekstrand goto err; 1740a34857dcSJason Ekstrand } 1741a34857dcSJason Ekstrand 1742a34857dcSJason Ekstrand ctx = i915_gem_create_context(i915, pc); 1743d3ac8d42SDaniele Ceraolo Spurio proto_context_close(i915, pc); 174410be98a7SChris Wilson if (IS_ERR(ctx)) { 174510be98a7SChris Wilson err = PTR_ERR(ctx); 174610be98a7SChris Wilson goto err; 174710be98a7SChris Wilson } 174810be98a7SChris Wilson 1749a4c1cdd3SJason Ekstrand gem_context_register(ctx, file_priv, 0); 175010be98a7SChris Wilson 175110be98a7SChris Wilson return 0; 175210be98a7SChris Wilson 175310be98a7SChris Wilson err: 17545dbd2b7bSChris Wilson xa_destroy(&file_priv->vm_xa); 1755c100777cSTvrtko Ursulin xa_destroy(&file_priv->context_xa); 1756a4c1cdd3SJason Ekstrand xa_destroy(&file_priv->proto_context_xa); 1757a4c1cdd3SJason Ekstrand mutex_destroy(&file_priv->proto_context_lock); 175810be98a7SChris Wilson return err; 175910be98a7SChris Wilson } 176010be98a7SChris Wilson 176110be98a7SChris Wilson void i915_gem_context_close(struct drm_file *file) 176210be98a7SChris Wilson { 176310be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 1764a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc; 17655dbd2b7bSChris Wilson struct i915_address_space *vm; 1766c100777cSTvrtko Ursulin struct i915_gem_context *ctx; 1767c100777cSTvrtko Ursulin unsigned long idx; 176810be98a7SChris Wilson 1769a4c1cdd3SJason Ekstrand xa_for_each(&file_priv->proto_context_xa, idx, pc) 1770d3ac8d42SDaniele Ceraolo Spurio proto_context_close(file_priv->dev_priv, pc); 1771a4c1cdd3SJason Ekstrand xa_destroy(&file_priv->proto_context_xa); 1772a4c1cdd3SJason Ekstrand mutex_destroy(&file_priv->proto_context_lock); 1773a4c1cdd3SJason Ekstrand 1774c100777cSTvrtko Ursulin xa_for_each(&file_priv->context_xa, idx, ctx) 1775c100777cSTvrtko Ursulin context_close(ctx); 1776c100777cSTvrtko Ursulin xa_destroy(&file_priv->context_xa); 177710be98a7SChris Wilson 17785dbd2b7bSChris Wilson xa_for_each(&file_priv->vm_xa, idx, vm) 17795dbd2b7bSChris Wilson i915_vm_put(vm); 17805dbd2b7bSChris Wilson xa_destroy(&file_priv->vm_xa); 178110be98a7SChris Wilson } 178210be98a7SChris Wilson 178310be98a7SChris Wilson int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 178410be98a7SChris Wilson struct drm_file *file) 178510be98a7SChris Wilson { 178610be98a7SChris Wilson struct drm_i915_private *i915 = to_i915(dev); 178710be98a7SChris Wilson struct drm_i915_gem_vm_control *args = data; 178810be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 1789ab53497bSChris Wilson struct i915_ppgtt *ppgtt; 17905dbd2b7bSChris Wilson u32 id; 179110be98a7SChris Wilson int err; 179210be98a7SChris Wilson 179310be98a7SChris Wilson if (!HAS_FULL_PPGTT(i915)) 179410be98a7SChris Wilson return -ENODEV; 179510be98a7SChris Wilson 179610be98a7SChris Wilson if (args->flags) 179710be98a7SChris Wilson return -EINVAL; 179810be98a7SChris Wilson 17991a9c4db4SMichał Winiarski ppgtt = i915_ppgtt_create(to_gt(i915), 0); 180010be98a7SChris Wilson if (IS_ERR(ppgtt)) 180110be98a7SChris Wilson return PTR_ERR(ppgtt); 180210be98a7SChris Wilson 180310be98a7SChris Wilson if (args->extensions) { 180410be98a7SChris Wilson err = i915_user_extensions(u64_to_user_ptr(args->extensions), 180510be98a7SChris Wilson NULL, 0, 180610be98a7SChris Wilson ppgtt); 180710be98a7SChris Wilson if (err) 180810be98a7SChris Wilson goto err_put; 180910be98a7SChris Wilson } 181010be98a7SChris Wilson 18115dbd2b7bSChris Wilson err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 18125dbd2b7bSChris Wilson xa_limit_32b, GFP_KERNEL); 181310be98a7SChris Wilson if (err) 181410be98a7SChris Wilson goto err_put; 181510be98a7SChris Wilson 18165dbd2b7bSChris Wilson GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 18175dbd2b7bSChris Wilson args->vm_id = id; 181810be98a7SChris Wilson return 0; 181910be98a7SChris Wilson 182010be98a7SChris Wilson err_put: 1821e568ac38SChris Wilson i915_vm_put(&ppgtt->vm); 182210be98a7SChris Wilson return err; 182310be98a7SChris Wilson } 182410be98a7SChris Wilson 182510be98a7SChris Wilson int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 182610be98a7SChris Wilson struct drm_file *file) 182710be98a7SChris Wilson { 182810be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 182910be98a7SChris Wilson struct drm_i915_gem_vm_control *args = data; 1830e568ac38SChris Wilson struct i915_address_space *vm; 183110be98a7SChris Wilson 183210be98a7SChris Wilson if (args->flags) 183310be98a7SChris Wilson return -EINVAL; 183410be98a7SChris Wilson 183510be98a7SChris Wilson if (args->extensions) 183610be98a7SChris Wilson return -EINVAL; 183710be98a7SChris Wilson 18385dbd2b7bSChris Wilson vm = xa_erase(&file_priv->vm_xa, args->vm_id); 1839e568ac38SChris Wilson if (!vm) 184010be98a7SChris Wilson return -ENOENT; 184110be98a7SChris Wilson 1842e568ac38SChris Wilson i915_vm_put(vm); 184310be98a7SChris Wilson return 0; 184410be98a7SChris Wilson } 184510be98a7SChris Wilson 184610be98a7SChris Wilson static int get_ppgtt(struct drm_i915_file_private *file_priv, 184710be98a7SChris Wilson struct i915_gem_context *ctx, 184810be98a7SChris Wilson struct drm_i915_gem_context_param *args) 184910be98a7SChris Wilson { 1850e568ac38SChris Wilson struct i915_address_space *vm; 18515dbd2b7bSChris Wilson int err; 18525dbd2b7bSChris Wilson u32 id; 185310be98a7SChris Wilson 1854a82a9979SDaniel Vetter if (!i915_gem_context_has_full_ppgtt(ctx)) 185510be98a7SChris Wilson return -ENODEV; 185610be98a7SChris Wilson 18579ec8795eSDaniel Vetter vm = ctx->vm; 18589ec8795eSDaniel Vetter GEM_BUG_ON(!vm); 185990211ea4SChris Wilson 186090211ea4SChris Wilson err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 18615dbd2b7bSChris Wilson if (err) 18629ec8795eSDaniel Vetter return err; 186310be98a7SChris Wilson 1864e1a7ab4fSThomas Hellström i915_vm_get(vm); 186510be98a7SChris Wilson 18665dbd2b7bSChris Wilson GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 18675dbd2b7bSChris Wilson args->value = id; 186810be98a7SChris Wilson args->size = 0; 186910be98a7SChris Wilson 18705dbd2b7bSChris Wilson return err; 187110be98a7SChris Wilson } 187210be98a7SChris Wilson 187311ecbdddSLionel Landwerlin int 18740b6613c6SVenkata Sandeep Dhanalakota i915_gem_user_to_context_sseu(struct intel_gt *gt, 187510be98a7SChris Wilson const struct drm_i915_gem_context_param_sseu *user, 187610be98a7SChris Wilson struct intel_sseu *context) 187710be98a7SChris Wilson { 18780b6613c6SVenkata Sandeep Dhanalakota const struct sseu_dev_info *device = >->info.sseu; 18790b6613c6SVenkata Sandeep Dhanalakota struct drm_i915_private *i915 = gt->i915; 1880b87d3901SMatt Roper unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0); 188110be98a7SChris Wilson 188210be98a7SChris Wilson /* No zeros in any field. */ 188310be98a7SChris Wilson if (!user->slice_mask || !user->subslice_mask || 188410be98a7SChris Wilson !user->min_eus_per_subslice || !user->max_eus_per_subslice) 188510be98a7SChris Wilson return -EINVAL; 188610be98a7SChris Wilson 188710be98a7SChris Wilson /* Max > min. */ 188810be98a7SChris Wilson if (user->max_eus_per_subslice < user->min_eus_per_subslice) 188910be98a7SChris Wilson return -EINVAL; 189010be98a7SChris Wilson 189110be98a7SChris Wilson /* 189210be98a7SChris Wilson * Some future proofing on the types since the uAPI is wider than the 189310be98a7SChris Wilson * current internal implementation. 189410be98a7SChris Wilson */ 189510be98a7SChris Wilson if (overflows_type(user->slice_mask, context->slice_mask) || 189610be98a7SChris Wilson overflows_type(user->subslice_mask, context->subslice_mask) || 189710be98a7SChris Wilson overflows_type(user->min_eus_per_subslice, 189810be98a7SChris Wilson context->min_eus_per_subslice) || 189910be98a7SChris Wilson overflows_type(user->max_eus_per_subslice, 190010be98a7SChris Wilson context->max_eus_per_subslice)) 190110be98a7SChris Wilson return -EINVAL; 190210be98a7SChris Wilson 190310be98a7SChris Wilson /* Check validity against hardware. */ 190410be98a7SChris Wilson if (user->slice_mask & ~device->slice_mask) 190510be98a7SChris Wilson return -EINVAL; 190610be98a7SChris Wilson 1907b87d3901SMatt Roper if (user->subslice_mask & ~dev_subslice_mask) 190810be98a7SChris Wilson return -EINVAL; 190910be98a7SChris Wilson 191010be98a7SChris Wilson if (user->max_eus_per_subslice > device->max_eus_per_subslice) 191110be98a7SChris Wilson return -EINVAL; 191210be98a7SChris Wilson 191310be98a7SChris Wilson context->slice_mask = user->slice_mask; 191410be98a7SChris Wilson context->subslice_mask = user->subslice_mask; 191510be98a7SChris Wilson context->min_eus_per_subslice = user->min_eus_per_subslice; 191610be98a7SChris Wilson context->max_eus_per_subslice = user->max_eus_per_subslice; 191710be98a7SChris Wilson 191810be98a7SChris Wilson /* Part specific restrictions. */ 191940e1956eSLucas De Marchi if (GRAPHICS_VER(i915) == 11) { 192010be98a7SChris Wilson unsigned int hw_s = hweight8(device->slice_mask); 1921b87d3901SMatt Roper unsigned int hw_ss_per_s = hweight8(dev_subslice_mask); 192210be98a7SChris Wilson unsigned int req_s = hweight8(context->slice_mask); 192310be98a7SChris Wilson unsigned int req_ss = hweight8(context->subslice_mask); 192410be98a7SChris Wilson 192510be98a7SChris Wilson /* 192610be98a7SChris Wilson * Only full subslice enablement is possible if more than one 192710be98a7SChris Wilson * slice is turned on. 192810be98a7SChris Wilson */ 192910be98a7SChris Wilson if (req_s > 1 && req_ss != hw_ss_per_s) 193010be98a7SChris Wilson return -EINVAL; 193110be98a7SChris Wilson 193210be98a7SChris Wilson /* 193310be98a7SChris Wilson * If more than four (SScount bitfield limit) subslices are 193410be98a7SChris Wilson * requested then the number has to be even. 193510be98a7SChris Wilson */ 193610be98a7SChris Wilson if (req_ss > 4 && (req_ss & 1)) 193710be98a7SChris Wilson return -EINVAL; 193810be98a7SChris Wilson 193910be98a7SChris Wilson /* 194010be98a7SChris Wilson * If only one slice is enabled and subslice count is below the 194110be98a7SChris Wilson * device full enablement, it must be at most half of the all 194210be98a7SChris Wilson * available subslices. 194310be98a7SChris Wilson */ 194410be98a7SChris Wilson if (req_s == 1 && req_ss < hw_ss_per_s && 194510be98a7SChris Wilson req_ss > (hw_ss_per_s / 2)) 194610be98a7SChris Wilson return -EINVAL; 194710be98a7SChris Wilson 194810be98a7SChris Wilson /* ABI restriction - VME use case only. */ 194910be98a7SChris Wilson 195010be98a7SChris Wilson /* All slices or one slice only. */ 195110be98a7SChris Wilson if (req_s != 1 && req_s != hw_s) 195210be98a7SChris Wilson return -EINVAL; 195310be98a7SChris Wilson 195410be98a7SChris Wilson /* 195510be98a7SChris Wilson * Half subslices or full enablement only when one slice is 195610be98a7SChris Wilson * enabled. 195710be98a7SChris Wilson */ 195810be98a7SChris Wilson if (req_s == 1 && 195910be98a7SChris Wilson (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 196010be98a7SChris Wilson return -EINVAL; 196110be98a7SChris Wilson 196210be98a7SChris Wilson /* No EU configuration changes. */ 196310be98a7SChris Wilson if ((user->min_eus_per_subslice != 196410be98a7SChris Wilson device->max_eus_per_subslice) || 196510be98a7SChris Wilson (user->max_eus_per_subslice != 196610be98a7SChris Wilson device->max_eus_per_subslice)) 196710be98a7SChris Wilson return -EINVAL; 196810be98a7SChris Wilson } 196910be98a7SChris Wilson 197010be98a7SChris Wilson return 0; 197110be98a7SChris Wilson } 197210be98a7SChris Wilson 197310be98a7SChris Wilson static int set_sseu(struct i915_gem_context *ctx, 197410be98a7SChris Wilson struct drm_i915_gem_context_param *args) 197510be98a7SChris Wilson { 197610be98a7SChris Wilson struct drm_i915_private *i915 = ctx->i915; 197710be98a7SChris Wilson struct drm_i915_gem_context_param_sseu user_sseu; 197810be98a7SChris Wilson struct intel_context *ce; 197910be98a7SChris Wilson struct intel_sseu sseu; 198010be98a7SChris Wilson unsigned long lookup; 198110be98a7SChris Wilson int ret; 198210be98a7SChris Wilson 198310be98a7SChris Wilson if (args->size < sizeof(user_sseu)) 198410be98a7SChris Wilson return -EINVAL; 198510be98a7SChris Wilson 198640e1956eSLucas De Marchi if (GRAPHICS_VER(i915) != 11) 198710be98a7SChris Wilson return -ENODEV; 198810be98a7SChris Wilson 198910be98a7SChris Wilson if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 199010be98a7SChris Wilson sizeof(user_sseu))) 199110be98a7SChris Wilson return -EFAULT; 199210be98a7SChris Wilson 199310be98a7SChris Wilson if (user_sseu.rsvd) 199410be98a7SChris Wilson return -EINVAL; 199510be98a7SChris Wilson 199610be98a7SChris Wilson if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 199710be98a7SChris Wilson return -EINVAL; 199810be98a7SChris Wilson 199910be98a7SChris Wilson lookup = 0; 200010be98a7SChris Wilson if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 200110be98a7SChris Wilson lookup |= LOOKUP_USER_INDEX; 200210be98a7SChris Wilson 200310be98a7SChris Wilson ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 200410be98a7SChris Wilson if (IS_ERR(ce)) 200510be98a7SChris Wilson return PTR_ERR(ce); 200610be98a7SChris Wilson 200710be98a7SChris Wilson /* Only render engine supports RPCS configuration. */ 200810be98a7SChris Wilson if (ce->engine->class != RENDER_CLASS) { 200910be98a7SChris Wilson ret = -ENODEV; 201010be98a7SChris Wilson goto out_ce; 201110be98a7SChris Wilson } 201210be98a7SChris Wilson 20130b6613c6SVenkata Sandeep Dhanalakota ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); 201410be98a7SChris Wilson if (ret) 201510be98a7SChris Wilson goto out_ce; 201610be98a7SChris Wilson 201710be98a7SChris Wilson ret = intel_context_reconfigure_sseu(ce, sseu); 201810be98a7SChris Wilson if (ret) 201910be98a7SChris Wilson goto out_ce; 202010be98a7SChris Wilson 202110be98a7SChris Wilson args->size = sizeof(user_sseu); 202210be98a7SChris Wilson 202310be98a7SChris Wilson out_ce: 202410be98a7SChris Wilson intel_context_put(ce); 202510be98a7SChris Wilson return ret; 202610be98a7SChris Wilson } 202710be98a7SChris Wilson 202810be98a7SChris Wilson static int 2029a0e04715SChris Wilson set_persistence(struct i915_gem_context *ctx, 2030a0e04715SChris Wilson const struct drm_i915_gem_context_param *args) 2031a0e04715SChris Wilson { 2032a0e04715SChris Wilson if (args->size) 2033a0e04715SChris Wilson return -EINVAL; 2034a0e04715SChris Wilson 2035a0e04715SChris Wilson return __context_set_persistence(ctx, args->value); 2036a0e04715SChris Wilson } 2037a0e04715SChris Wilson 20380f100b70SChris Wilson static int set_priority(struct i915_gem_context *ctx, 20390f100b70SChris Wilson const struct drm_i915_gem_context_param *args) 20400f100b70SChris Wilson { 2041b9709057SDaniel Vetter struct i915_gem_engines_iter it; 2042b9709057SDaniel Vetter struct intel_context *ce; 2043aaa5957cSJason Ekstrand int err; 20440f100b70SChris Wilson 2045aaa5957cSJason Ekstrand err = validate_priority(ctx->i915, args); 2046aaa5957cSJason Ekstrand if (err) 2047aaa5957cSJason Ekstrand return err; 20480f100b70SChris Wilson 2049aaa5957cSJason Ekstrand ctx->sched.priority = args->value; 2050b9709057SDaniel Vetter 2051b9709057SDaniel Vetter for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 2052b9709057SDaniel Vetter if (!intel_engine_has_timeslices(ce->engine)) 2053b9709057SDaniel Vetter continue; 2054b9709057SDaniel Vetter 2055b9709057SDaniel Vetter if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 2056b9709057SDaniel Vetter intel_engine_has_semaphores(ce->engine)) 2057b9709057SDaniel Vetter intel_context_set_use_semaphores(ce); 2058b9709057SDaniel Vetter else 2059b9709057SDaniel Vetter intel_context_clear_use_semaphores(ce); 2060b9709057SDaniel Vetter } 2061b9709057SDaniel Vetter i915_gem_context_unlock_engines(ctx); 20620f100b70SChris Wilson 20630f100b70SChris Wilson return 0; 20640f100b70SChris Wilson } 20650f100b70SChris Wilson 2066d3ac8d42SDaniele Ceraolo Spurio static int get_protected(struct i915_gem_context *ctx, 2067d3ac8d42SDaniele Ceraolo Spurio struct drm_i915_gem_context_param *args) 2068d3ac8d42SDaniele Ceraolo Spurio { 2069d3ac8d42SDaniele Ceraolo Spurio args->size = 0; 2070d3ac8d42SDaniele Ceraolo Spurio args->value = i915_gem_context_uses_protected_content(ctx); 2071d3ac8d42SDaniele Ceraolo Spurio 2072d3ac8d42SDaniele Ceraolo Spurio return 0; 2073d3ac8d42SDaniele Ceraolo Spurio } 2074d3ac8d42SDaniele Ceraolo Spurio 207510be98a7SChris Wilson static int ctx_setparam(struct drm_i915_file_private *fpriv, 207610be98a7SChris Wilson struct i915_gem_context *ctx, 207710be98a7SChris Wilson struct drm_i915_gem_context_param *args) 207810be98a7SChris Wilson { 207910be98a7SChris Wilson int ret = 0; 208010be98a7SChris Wilson 208110be98a7SChris Wilson switch (args->param) { 208210be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 208310be98a7SChris Wilson if (args->size) 208410be98a7SChris Wilson ret = -EINVAL; 208510be98a7SChris Wilson else if (args->value) 208610be98a7SChris Wilson i915_gem_context_set_no_error_capture(ctx); 208710be98a7SChris Wilson else 208810be98a7SChris Wilson i915_gem_context_clear_no_error_capture(ctx); 208910be98a7SChris Wilson break; 209010be98a7SChris Wilson 209110be98a7SChris Wilson case I915_CONTEXT_PARAM_BANNABLE: 209210be98a7SChris Wilson if (args->size) 209310be98a7SChris Wilson ret = -EINVAL; 209410be98a7SChris Wilson else if (!capable(CAP_SYS_ADMIN) && !args->value) 209510be98a7SChris Wilson ret = -EPERM; 209610be98a7SChris Wilson else if (args->value) 209710be98a7SChris Wilson i915_gem_context_set_bannable(ctx); 2098d3ac8d42SDaniele Ceraolo Spurio else if (i915_gem_context_uses_protected_content(ctx)) 2099d3ac8d42SDaniele Ceraolo Spurio ret = -EPERM; /* can't clear this for protected contexts */ 210010be98a7SChris Wilson else 210110be98a7SChris Wilson i915_gem_context_clear_bannable(ctx); 210210be98a7SChris Wilson break; 210310be98a7SChris Wilson 210410be98a7SChris Wilson case I915_CONTEXT_PARAM_RECOVERABLE: 210510be98a7SChris Wilson if (args->size) 210610be98a7SChris Wilson ret = -EINVAL; 2107d3ac8d42SDaniele Ceraolo Spurio else if (!args->value) 210810be98a7SChris Wilson i915_gem_context_clear_recoverable(ctx); 2109d3ac8d42SDaniele Ceraolo Spurio else if (i915_gem_context_uses_protected_content(ctx)) 2110d3ac8d42SDaniele Ceraolo Spurio ret = -EPERM; /* can't set this for protected contexts */ 2111d3ac8d42SDaniele Ceraolo Spurio else 2112d3ac8d42SDaniele Ceraolo Spurio i915_gem_context_set_recoverable(ctx); 211310be98a7SChris Wilson break; 211410be98a7SChris Wilson 211510be98a7SChris Wilson case I915_CONTEXT_PARAM_PRIORITY: 21160f100b70SChris Wilson ret = set_priority(ctx, args); 211710be98a7SChris Wilson break; 211810be98a7SChris Wilson 211910be98a7SChris Wilson case I915_CONTEXT_PARAM_SSEU: 212010be98a7SChris Wilson ret = set_sseu(ctx, args); 212110be98a7SChris Wilson break; 212210be98a7SChris Wilson 2123a0e04715SChris Wilson case I915_CONTEXT_PARAM_PERSISTENCE: 2124a0e04715SChris Wilson ret = set_persistence(ctx, args); 2125a0e04715SChris Wilson break; 2126a0e04715SChris Wilson 2127d3ac8d42SDaniele Ceraolo Spurio case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 21286ff6d61dSJason Ekstrand case I915_CONTEXT_PARAM_NO_ZEROMAP: 212910be98a7SChris Wilson case I915_CONTEXT_PARAM_BAN_PERIOD: 2130fe4751c3SJason Ekstrand case I915_CONTEXT_PARAM_RINGSIZE: 2131ccbc1b97SJason Ekstrand case I915_CONTEXT_PARAM_VM: 2132d9d29c74SJason Ekstrand case I915_CONTEXT_PARAM_ENGINES: 213310be98a7SChris Wilson default: 213410be98a7SChris Wilson ret = -EINVAL; 213510be98a7SChris Wilson break; 213610be98a7SChris Wilson } 213710be98a7SChris Wilson 213810be98a7SChris Wilson return ret; 213910be98a7SChris Wilson } 214010be98a7SChris Wilson 214110be98a7SChris Wilson struct create_ext { 2142d4433c76SJason Ekstrand struct i915_gem_proto_context *pc; 214310be98a7SChris Wilson struct drm_i915_file_private *fpriv; 214410be98a7SChris Wilson }; 214510be98a7SChris Wilson 214610be98a7SChris Wilson static int create_setparam(struct i915_user_extension __user *ext, void *data) 214710be98a7SChris Wilson { 214810be98a7SChris Wilson struct drm_i915_gem_context_create_ext_setparam local; 214910be98a7SChris Wilson const struct create_ext *arg = data; 215010be98a7SChris Wilson 215110be98a7SChris Wilson if (copy_from_user(&local, ext, sizeof(local))) 215210be98a7SChris Wilson return -EFAULT; 215310be98a7SChris Wilson 215410be98a7SChris Wilson if (local.param.ctx_id) 215510be98a7SChris Wilson return -EINVAL; 215610be98a7SChris Wilson 2157d4433c76SJason Ekstrand return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param); 215810be98a7SChris Wilson } 215910be98a7SChris Wilson 21604a766ae4SJason Ekstrand static int invalid_ext(struct i915_user_extension __user *ext, void *data) 216110be98a7SChris Wilson { 216210be98a7SChris Wilson return -EINVAL; 216310be98a7SChris Wilson } 216410be98a7SChris Wilson 216510be98a7SChris Wilson static const i915_user_extension_fn create_extensions[] = { 216610be98a7SChris Wilson [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 21674a766ae4SJason Ekstrand [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext, 216810be98a7SChris Wilson }; 216910be98a7SChris Wilson 217010be98a7SChris Wilson static bool client_is_banned(struct drm_i915_file_private *file_priv) 217110be98a7SChris Wilson { 217210be98a7SChris Wilson return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 217310be98a7SChris Wilson } 217410be98a7SChris Wilson 2175a4c1cdd3SJason Ekstrand static inline struct i915_gem_context * 2176a4c1cdd3SJason Ekstrand __context_lookup(struct drm_i915_file_private *file_priv, u32 id) 2177a4c1cdd3SJason Ekstrand { 2178a4c1cdd3SJason Ekstrand struct i915_gem_context *ctx; 2179a4c1cdd3SJason Ekstrand 2180a4c1cdd3SJason Ekstrand rcu_read_lock(); 2181a4c1cdd3SJason Ekstrand ctx = xa_load(&file_priv->context_xa, id); 2182a4c1cdd3SJason Ekstrand if (ctx && !kref_get_unless_zero(&ctx->ref)) 2183a4c1cdd3SJason Ekstrand ctx = NULL; 2184a4c1cdd3SJason Ekstrand rcu_read_unlock(); 2185a4c1cdd3SJason Ekstrand 2186a4c1cdd3SJason Ekstrand return ctx; 2187a4c1cdd3SJason Ekstrand } 2188a4c1cdd3SJason Ekstrand 2189a4c1cdd3SJason Ekstrand static struct i915_gem_context * 2190a4c1cdd3SJason Ekstrand finalize_create_context_locked(struct drm_i915_file_private *file_priv, 2191a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc, u32 id) 2192a4c1cdd3SJason Ekstrand { 2193a4c1cdd3SJason Ekstrand struct i915_gem_context *ctx; 2194a4c1cdd3SJason Ekstrand void *old; 2195a4c1cdd3SJason Ekstrand 2196a4c1cdd3SJason Ekstrand lockdep_assert_held(&file_priv->proto_context_lock); 2197a4c1cdd3SJason Ekstrand 2198a4c1cdd3SJason Ekstrand ctx = i915_gem_create_context(file_priv->dev_priv, pc); 2199a4c1cdd3SJason Ekstrand if (IS_ERR(ctx)) 2200a4c1cdd3SJason Ekstrand return ctx; 2201a4c1cdd3SJason Ekstrand 2202a4c1cdd3SJason Ekstrand gem_context_register(ctx, file_priv, id); 2203a4c1cdd3SJason Ekstrand 2204a4c1cdd3SJason Ekstrand old = xa_erase(&file_priv->proto_context_xa, id); 2205a4c1cdd3SJason Ekstrand GEM_BUG_ON(old != pc); 2206d3ac8d42SDaniele Ceraolo Spurio proto_context_close(file_priv->dev_priv, pc); 2207a4c1cdd3SJason Ekstrand 2208a4c1cdd3SJason Ekstrand /* One for the xarray and one for the caller */ 2209a4c1cdd3SJason Ekstrand return i915_gem_context_get(ctx); 2210a4c1cdd3SJason Ekstrand } 2211a4c1cdd3SJason Ekstrand 2212a4c1cdd3SJason Ekstrand struct i915_gem_context * 2213a4c1cdd3SJason Ekstrand i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 2214a4c1cdd3SJason Ekstrand { 2215a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc; 2216a4c1cdd3SJason Ekstrand struct i915_gem_context *ctx; 2217a4c1cdd3SJason Ekstrand 2218a4c1cdd3SJason Ekstrand ctx = __context_lookup(file_priv, id); 2219a4c1cdd3SJason Ekstrand if (ctx) 2220a4c1cdd3SJason Ekstrand return ctx; 2221a4c1cdd3SJason Ekstrand 2222a4c1cdd3SJason Ekstrand mutex_lock(&file_priv->proto_context_lock); 2223a4c1cdd3SJason Ekstrand /* Try one more time under the lock */ 2224a4c1cdd3SJason Ekstrand ctx = __context_lookup(file_priv, id); 2225a4c1cdd3SJason Ekstrand if (!ctx) { 2226a4c1cdd3SJason Ekstrand pc = xa_load(&file_priv->proto_context_xa, id); 2227a4c1cdd3SJason Ekstrand if (!pc) 2228a4c1cdd3SJason Ekstrand ctx = ERR_PTR(-ENOENT); 2229a4c1cdd3SJason Ekstrand else 2230a4c1cdd3SJason Ekstrand ctx = finalize_create_context_locked(file_priv, pc, id); 2231a4c1cdd3SJason Ekstrand } 2232a4c1cdd3SJason Ekstrand mutex_unlock(&file_priv->proto_context_lock); 2233a4c1cdd3SJason Ekstrand 2234a4c1cdd3SJason Ekstrand return ctx; 2235a4c1cdd3SJason Ekstrand } 2236a4c1cdd3SJason Ekstrand 223710be98a7SChris Wilson int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 223810be98a7SChris Wilson struct drm_file *file) 223910be98a7SChris Wilson { 224010be98a7SChris Wilson struct drm_i915_private *i915 = to_i915(dev); 224110be98a7SChris Wilson struct drm_i915_gem_context_create_ext *args = data; 224210be98a7SChris Wilson struct create_ext ext_data; 224310be98a7SChris Wilson int ret; 2244c100777cSTvrtko Ursulin u32 id; 224510be98a7SChris Wilson 224610be98a7SChris Wilson if (!DRIVER_CAPS(i915)->has_logical_contexts) 224710be98a7SChris Wilson return -ENODEV; 224810be98a7SChris Wilson 224910be98a7SChris Wilson if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 225010be98a7SChris Wilson return -EINVAL; 225110be98a7SChris Wilson 22521a9c4db4SMichał Winiarski ret = intel_gt_terminally_wedged(to_gt(i915)); 225310be98a7SChris Wilson if (ret) 225410be98a7SChris Wilson return ret; 225510be98a7SChris Wilson 225610be98a7SChris Wilson ext_data.fpriv = file->driver_priv; 225710be98a7SChris Wilson if (client_is_banned(ext_data.fpriv)) { 2258baa89ba3SWambui Karuga drm_dbg(&i915->drm, 2259baa89ba3SWambui Karuga "client %s[%d] banned from creating ctx\n", 2260ba16a48aSTvrtko Ursulin current->comm, task_pid_nr(current)); 226110be98a7SChris Wilson return -EIO; 226210be98a7SChris Wilson } 226310be98a7SChris Wilson 2264d4433c76SJason Ekstrand ext_data.pc = proto_context_create(i915, args->flags); 2265d4433c76SJason Ekstrand if (IS_ERR(ext_data.pc)) 2266d4433c76SJason Ekstrand return PTR_ERR(ext_data.pc); 226710be98a7SChris Wilson 226810be98a7SChris Wilson if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 226910be98a7SChris Wilson ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 227010be98a7SChris Wilson create_extensions, 227110be98a7SChris Wilson ARRAY_SIZE(create_extensions), 227210be98a7SChris Wilson &ext_data); 2273a4c1cdd3SJason Ekstrand if (ret) 2274a4c1cdd3SJason Ekstrand goto err_pc; 227510be98a7SChris Wilson } 227610be98a7SChris Wilson 2277ca06f936SJason Ekstrand if (GRAPHICS_VER(i915) > 12) { 2278ca06f936SJason Ekstrand struct i915_gem_context *ctx; 2279ca06f936SJason Ekstrand 2280ca06f936SJason Ekstrand /* Get ourselves a context ID */ 2281ca06f936SJason Ekstrand ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL, 2282ca06f936SJason Ekstrand xa_limit_32b, GFP_KERNEL); 2283ca06f936SJason Ekstrand if (ret) 2284ca06f936SJason Ekstrand goto err_pc; 2285ca06f936SJason Ekstrand 2286ca06f936SJason Ekstrand ctx = i915_gem_create_context(i915, ext_data.pc); 2287ca06f936SJason Ekstrand if (IS_ERR(ctx)) { 2288ca06f936SJason Ekstrand ret = PTR_ERR(ctx); 2289ca06f936SJason Ekstrand goto err_pc; 2290ca06f936SJason Ekstrand } 2291ca06f936SJason Ekstrand 2292d3ac8d42SDaniele Ceraolo Spurio proto_context_close(i915, ext_data.pc); 2293ca06f936SJason Ekstrand gem_context_register(ctx, ext_data.fpriv, id); 2294ca06f936SJason Ekstrand } else { 2295a4c1cdd3SJason Ekstrand ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id); 229610be98a7SChris Wilson if (ret < 0) 2297a4c1cdd3SJason Ekstrand goto err_pc; 2298ca06f936SJason Ekstrand } 229910be98a7SChris Wilson 2300c100777cSTvrtko Ursulin args->ctx_id = id; 2301baa89ba3SWambui Karuga drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); 230210be98a7SChris Wilson 230310be98a7SChris Wilson return 0; 230410be98a7SChris Wilson 2305a4c1cdd3SJason Ekstrand err_pc: 2306d3ac8d42SDaniele Ceraolo Spurio proto_context_close(i915, ext_data.pc); 230710be98a7SChris Wilson return ret; 230810be98a7SChris Wilson } 230910be98a7SChris Wilson 231010be98a7SChris Wilson int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 231110be98a7SChris Wilson struct drm_file *file) 231210be98a7SChris Wilson { 231310be98a7SChris Wilson struct drm_i915_gem_context_destroy *args = data; 231410be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 2315a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc; 231610be98a7SChris Wilson struct i915_gem_context *ctx; 231710be98a7SChris Wilson 231810be98a7SChris Wilson if (args->pad != 0) 231910be98a7SChris Wilson return -EINVAL; 232010be98a7SChris Wilson 232110be98a7SChris Wilson if (!args->ctx_id) 232210be98a7SChris Wilson return -ENOENT; 232310be98a7SChris Wilson 2324a4c1cdd3SJason Ekstrand /* We need to hold the proto-context lock here to prevent races 2325a4c1cdd3SJason Ekstrand * with finalize_create_context_locked(). 2326a4c1cdd3SJason Ekstrand */ 2327a4c1cdd3SJason Ekstrand mutex_lock(&file_priv->proto_context_lock); 2328c100777cSTvrtko Ursulin ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2329a4c1cdd3SJason Ekstrand pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id); 2330a4c1cdd3SJason Ekstrand mutex_unlock(&file_priv->proto_context_lock); 233110be98a7SChris Wilson 2332a4c1cdd3SJason Ekstrand if (!ctx && !pc) 2333a4c1cdd3SJason Ekstrand return -ENOENT; 2334a4c1cdd3SJason Ekstrand GEM_WARN_ON(ctx && pc); 2335a4c1cdd3SJason Ekstrand 2336a4c1cdd3SJason Ekstrand if (pc) 2337d3ac8d42SDaniele Ceraolo Spurio proto_context_close(file_priv->dev_priv, pc); 2338a4c1cdd3SJason Ekstrand 2339a4c1cdd3SJason Ekstrand if (ctx) 234010be98a7SChris Wilson context_close(ctx); 2341a4c1cdd3SJason Ekstrand 234210be98a7SChris Wilson return 0; 234310be98a7SChris Wilson } 234410be98a7SChris Wilson 234510be98a7SChris Wilson static int get_sseu(struct i915_gem_context *ctx, 234610be98a7SChris Wilson struct drm_i915_gem_context_param *args) 234710be98a7SChris Wilson { 234810be98a7SChris Wilson struct drm_i915_gem_context_param_sseu user_sseu; 234910be98a7SChris Wilson struct intel_context *ce; 235010be98a7SChris Wilson unsigned long lookup; 235110be98a7SChris Wilson int err; 235210be98a7SChris Wilson 235310be98a7SChris Wilson if (args->size == 0) 235410be98a7SChris Wilson goto out; 235510be98a7SChris Wilson else if (args->size < sizeof(user_sseu)) 235610be98a7SChris Wilson return -EINVAL; 235710be98a7SChris Wilson 235810be98a7SChris Wilson if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 235910be98a7SChris Wilson sizeof(user_sseu))) 236010be98a7SChris Wilson return -EFAULT; 236110be98a7SChris Wilson 236210be98a7SChris Wilson if (user_sseu.rsvd) 236310be98a7SChris Wilson return -EINVAL; 236410be98a7SChris Wilson 236510be98a7SChris Wilson if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 236610be98a7SChris Wilson return -EINVAL; 236710be98a7SChris Wilson 236810be98a7SChris Wilson lookup = 0; 236910be98a7SChris Wilson if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 237010be98a7SChris Wilson lookup |= LOOKUP_USER_INDEX; 237110be98a7SChris Wilson 237210be98a7SChris Wilson ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 237310be98a7SChris Wilson if (IS_ERR(ce)) 237410be98a7SChris Wilson return PTR_ERR(ce); 237510be98a7SChris Wilson 237610be98a7SChris Wilson err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 237710be98a7SChris Wilson if (err) { 237810be98a7SChris Wilson intel_context_put(ce); 237910be98a7SChris Wilson return err; 238010be98a7SChris Wilson } 238110be98a7SChris Wilson 238210be98a7SChris Wilson user_sseu.slice_mask = ce->sseu.slice_mask; 238310be98a7SChris Wilson user_sseu.subslice_mask = ce->sseu.subslice_mask; 238410be98a7SChris Wilson user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 238510be98a7SChris Wilson user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 238610be98a7SChris Wilson 238710be98a7SChris Wilson intel_context_unlock_pinned(ce); 238810be98a7SChris Wilson intel_context_put(ce); 238910be98a7SChris Wilson 239010be98a7SChris Wilson if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 239110be98a7SChris Wilson sizeof(user_sseu))) 239210be98a7SChris Wilson return -EFAULT; 239310be98a7SChris Wilson 239410be98a7SChris Wilson out: 239510be98a7SChris Wilson args->size = sizeof(user_sseu); 239610be98a7SChris Wilson 239710be98a7SChris Wilson return 0; 239810be98a7SChris Wilson } 239910be98a7SChris Wilson 240010be98a7SChris Wilson int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 240110be98a7SChris Wilson struct drm_file *file) 240210be98a7SChris Wilson { 240310be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 240410be98a7SChris Wilson struct drm_i915_gem_context_param *args = data; 240510be98a7SChris Wilson struct i915_gem_context *ctx; 240624fad29eSDaniel Vetter struct i915_address_space *vm; 240710be98a7SChris Wilson int ret = 0; 240810be98a7SChris Wilson 240910be98a7SChris Wilson ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2410046d1660SJason Ekstrand if (IS_ERR(ctx)) 2411046d1660SJason Ekstrand return PTR_ERR(ctx); 241210be98a7SChris Wilson 241310be98a7SChris Wilson switch (args->param) { 241410be98a7SChris Wilson case I915_CONTEXT_PARAM_GTT_SIZE: 241510be98a7SChris Wilson args->size = 0; 241624fad29eSDaniel Vetter vm = i915_gem_context_get_eb_vm(ctx); 241724fad29eSDaniel Vetter args->value = vm->total; 241824fad29eSDaniel Vetter i915_vm_put(vm); 241924fad29eSDaniel Vetter 242010be98a7SChris Wilson break; 242110be98a7SChris Wilson 242210be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 242310be98a7SChris Wilson args->size = 0; 242410be98a7SChris Wilson args->value = i915_gem_context_no_error_capture(ctx); 242510be98a7SChris Wilson break; 242610be98a7SChris Wilson 242710be98a7SChris Wilson case I915_CONTEXT_PARAM_BANNABLE: 242810be98a7SChris Wilson args->size = 0; 242910be98a7SChris Wilson args->value = i915_gem_context_is_bannable(ctx); 243010be98a7SChris Wilson break; 243110be98a7SChris Wilson 243210be98a7SChris Wilson case I915_CONTEXT_PARAM_RECOVERABLE: 243310be98a7SChris Wilson args->size = 0; 243410be98a7SChris Wilson args->value = i915_gem_context_is_recoverable(ctx); 243510be98a7SChris Wilson break; 243610be98a7SChris Wilson 243710be98a7SChris Wilson case I915_CONTEXT_PARAM_PRIORITY: 243810be98a7SChris Wilson args->size = 0; 2439eb5c10cbSChris Wilson args->value = ctx->sched.priority; 244010be98a7SChris Wilson break; 244110be98a7SChris Wilson 244210be98a7SChris Wilson case I915_CONTEXT_PARAM_SSEU: 244310be98a7SChris Wilson ret = get_sseu(ctx, args); 244410be98a7SChris Wilson break; 244510be98a7SChris Wilson 244610be98a7SChris Wilson case I915_CONTEXT_PARAM_VM: 244710be98a7SChris Wilson ret = get_ppgtt(file_priv, ctx, args); 244810be98a7SChris Wilson break; 244910be98a7SChris Wilson 2450a0e04715SChris Wilson case I915_CONTEXT_PARAM_PERSISTENCE: 2451a0e04715SChris Wilson args->size = 0; 2452a0e04715SChris Wilson args->value = i915_gem_context_is_persistent(ctx); 2453a0e04715SChris Wilson break; 2454a0e04715SChris Wilson 2455d3ac8d42SDaniele Ceraolo Spurio case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 2456d3ac8d42SDaniele Ceraolo Spurio ret = get_protected(ctx, args); 2457d3ac8d42SDaniele Ceraolo Spurio break; 2458d3ac8d42SDaniele Ceraolo Spurio 24596ff6d61dSJason Ekstrand case I915_CONTEXT_PARAM_NO_ZEROMAP: 246010be98a7SChris Wilson case I915_CONTEXT_PARAM_BAN_PERIOD: 2461c7a71fc8SJason Ekstrand case I915_CONTEXT_PARAM_ENGINES: 2462fe4751c3SJason Ekstrand case I915_CONTEXT_PARAM_RINGSIZE: 246310be98a7SChris Wilson default: 246410be98a7SChris Wilson ret = -EINVAL; 246510be98a7SChris Wilson break; 246610be98a7SChris Wilson } 246710be98a7SChris Wilson 246810be98a7SChris Wilson i915_gem_context_put(ctx); 246910be98a7SChris Wilson return ret; 247010be98a7SChris Wilson } 247110be98a7SChris Wilson 247210be98a7SChris Wilson int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 247310be98a7SChris Wilson struct drm_file *file) 247410be98a7SChris Wilson { 247510be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 247610be98a7SChris Wilson struct drm_i915_gem_context_param *args = data; 2477a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc; 247810be98a7SChris Wilson struct i915_gem_context *ctx; 2479a4c1cdd3SJason Ekstrand int ret = 0; 248010be98a7SChris Wilson 2481a4c1cdd3SJason Ekstrand mutex_lock(&file_priv->proto_context_lock); 2482a4c1cdd3SJason Ekstrand ctx = __context_lookup(file_priv, args->ctx_id); 2483a4c1cdd3SJason Ekstrand if (!ctx) { 2484a4c1cdd3SJason Ekstrand pc = xa_load(&file_priv->proto_context_xa, args->ctx_id); 2485ca06f936SJason Ekstrand if (pc) { 2486ca06f936SJason Ekstrand /* Contexts should be finalized inside 2487ca06f936SJason Ekstrand * GEM_CONTEXT_CREATE starting with graphics 2488ca06f936SJason Ekstrand * version 13. 2489ca06f936SJason Ekstrand */ 2490ca06f936SJason Ekstrand WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12); 2491a4c1cdd3SJason Ekstrand ret = set_proto_ctx_param(file_priv, pc, args); 2492ca06f936SJason Ekstrand } else { 2493a4c1cdd3SJason Ekstrand ret = -ENOENT; 2494a4c1cdd3SJason Ekstrand } 2495ca06f936SJason Ekstrand } 2496a4c1cdd3SJason Ekstrand mutex_unlock(&file_priv->proto_context_lock); 249710be98a7SChris Wilson 2498a4c1cdd3SJason Ekstrand if (ctx) { 249910be98a7SChris Wilson ret = ctx_setparam(file_priv, ctx, args); 250010be98a7SChris Wilson i915_gem_context_put(ctx); 2501a4c1cdd3SJason Ekstrand } 2502a4c1cdd3SJason Ekstrand 250310be98a7SChris Wilson return ret; 250410be98a7SChris Wilson } 250510be98a7SChris Wilson 250610be98a7SChris Wilson int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 250710be98a7SChris Wilson void *data, struct drm_file *file) 250810be98a7SChris Wilson { 2509a4e7ccdaSChris Wilson struct drm_i915_private *i915 = to_i915(dev); 251010be98a7SChris Wilson struct drm_i915_reset_stats *args = data; 251110be98a7SChris Wilson struct i915_gem_context *ctx; 251210be98a7SChris Wilson 251310be98a7SChris Wilson if (args->flags || args->pad) 251410be98a7SChris Wilson return -EINVAL; 251510be98a7SChris Wilson 2516a4839cb1SJason Ekstrand ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); 2517046d1660SJason Ekstrand if (IS_ERR(ctx)) 2518046d1660SJason Ekstrand return PTR_ERR(ctx); 251910be98a7SChris Wilson 252010be98a7SChris Wilson /* 252110be98a7SChris Wilson * We opt for unserialised reads here. This may result in tearing 252210be98a7SChris Wilson * in the extremely unlikely event of a GPU hang on this context 252310be98a7SChris Wilson * as we are querying them. If we need that extra layer of protection, 252410be98a7SChris Wilson * we should wrap the hangstats with a seqlock. 252510be98a7SChris Wilson */ 252610be98a7SChris Wilson 252710be98a7SChris Wilson if (capable(CAP_SYS_ADMIN)) 2528a4e7ccdaSChris Wilson args->reset_count = i915_reset_count(&i915->gpu_error); 252910be98a7SChris Wilson else 253010be98a7SChris Wilson args->reset_count = 0; 253110be98a7SChris Wilson 253210be98a7SChris Wilson args->batch_active = atomic_read(&ctx->guilty_count); 253310be98a7SChris Wilson args->batch_pending = atomic_read(&ctx->active_count); 253410be98a7SChris Wilson 2535a4839cb1SJason Ekstrand i915_gem_context_put(ctx); 2536a4839cb1SJason Ekstrand return 0; 253710be98a7SChris Wilson } 253810be98a7SChris Wilson 253910be98a7SChris Wilson /* GEM context-engines iterator: for_each_gem_engine() */ 254010be98a7SChris Wilson struct intel_context * 254110be98a7SChris Wilson i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 254210be98a7SChris Wilson { 254310be98a7SChris Wilson const struct i915_gem_engines *e = it->engines; 254410be98a7SChris Wilson struct intel_context *ctx; 254510be98a7SChris Wilson 2546130a95e9SChris Wilson if (unlikely(!e)) 2547130a95e9SChris Wilson return NULL; 2548130a95e9SChris Wilson 254910be98a7SChris Wilson do { 255010be98a7SChris Wilson if (it->idx >= e->num_engines) 255110be98a7SChris Wilson return NULL; 255210be98a7SChris Wilson 255310be98a7SChris Wilson ctx = e->engines[it->idx++]; 255410be98a7SChris Wilson } while (!ctx); 255510be98a7SChris Wilson 255610be98a7SChris Wilson return ctx; 255710be98a7SChris Wilson } 255810be98a7SChris Wilson 255910be98a7SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 256010be98a7SChris Wilson #include "selftests/mock_context.c" 256110be98a7SChris Wilson #include "selftests/i915_gem_context.c" 256210be98a7SChris Wilson #endif 256310be98a7SChris Wilson 2564a6270d1dSDaniel Vetter void i915_gem_context_module_exit(void) 256510be98a7SChris Wilson { 2566a6270d1dSDaniel Vetter kmem_cache_destroy(slab_luts); 256710be98a7SChris Wilson } 256810be98a7SChris Wilson 2569a6270d1dSDaniel Vetter int __init i915_gem_context_module_init(void) 257010be98a7SChris Wilson { 2571a6270d1dSDaniel Vetter slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2572a6270d1dSDaniel Vetter if (!slab_luts) 257310be98a7SChris Wilson return -ENOMEM; 257410be98a7SChris Wilson 257510be98a7SChris Wilson return 0; 257610be98a7SChris Wilson } 2577