110be98a7SChris Wilson /* 210be98a7SChris Wilson * SPDX-License-Identifier: MIT 310be98a7SChris Wilson * 410be98a7SChris Wilson * Copyright © 2011-2012 Intel Corporation 510be98a7SChris Wilson */ 610be98a7SChris Wilson 710be98a7SChris Wilson /* 810be98a7SChris Wilson * This file implements HW context support. On gen5+ a HW context consists of an 910be98a7SChris Wilson * opaque GPU object which is referenced at times of context saves and restores. 1010be98a7SChris Wilson * With RC6 enabled, the context is also referenced as the GPU enters and exists 1110be98a7SChris Wilson * from RC6 (GPU has it's own internal power context, except on gen5). Though 1210be98a7SChris Wilson * something like a context does exist for the media ring, the code only 1310be98a7SChris Wilson * supports contexts for the render ring. 1410be98a7SChris Wilson * 1510be98a7SChris Wilson * In software, there is a distinction between contexts created by the user, 1610be98a7SChris Wilson * and the default HW context. The default HW context is used by GPU clients 1710be98a7SChris Wilson * that do not request setup of their own hardware context. The default 1810be98a7SChris Wilson * context's state is never restored to help prevent programming errors. This 1910be98a7SChris Wilson * would happen if a client ran and piggy-backed off another clients GPU state. 2010be98a7SChris Wilson * The default context only exists to give the GPU some offset to load as the 2110be98a7SChris Wilson * current to invoke a save of the context we actually care about. In fact, the 2210be98a7SChris Wilson * code could likely be constructed, albeit in a more complicated fashion, to 2310be98a7SChris Wilson * never use the default context, though that limits the driver's ability to 2410be98a7SChris Wilson * swap out, and/or destroy other contexts. 2510be98a7SChris Wilson * 2610be98a7SChris Wilson * All other contexts are created as a request by the GPU client. These contexts 2710be98a7SChris Wilson * store GPU state, and thus allow GPU clients to not re-emit state (and 2810be98a7SChris Wilson * potentially query certain state) at any time. The kernel driver makes 2910be98a7SChris Wilson * certain that the appropriate commands are inserted. 3010be98a7SChris Wilson * 3110be98a7SChris Wilson * The context life cycle is semi-complicated in that context BOs may live 3210be98a7SChris Wilson * longer than the context itself because of the way the hardware, and object 3310be98a7SChris Wilson * tracking works. Below is a very crude representation of the state machine 3410be98a7SChris Wilson * describing the context life. 3510be98a7SChris Wilson * refcount pincount active 3610be98a7SChris Wilson * S0: initial state 0 0 0 3710be98a7SChris Wilson * S1: context created 1 0 0 3810be98a7SChris Wilson * S2: context is currently running 2 1 X 3910be98a7SChris Wilson * S3: GPU referenced, but not current 2 0 1 4010be98a7SChris Wilson * S4: context is current, but destroyed 1 1 0 4110be98a7SChris Wilson * S5: like S3, but destroyed 1 0 1 4210be98a7SChris Wilson * 4310be98a7SChris Wilson * The most common (but not all) transitions: 4410be98a7SChris Wilson * S0->S1: client creates a context 4510be98a7SChris Wilson * S1->S2: client submits execbuf with context 4610be98a7SChris Wilson * S2->S3: other clients submits execbuf with context 4710be98a7SChris Wilson * S3->S1: context object was retired 4810be98a7SChris Wilson * S3->S2: clients submits another execbuf 4910be98a7SChris Wilson * S2->S4: context destroy called with current context 5010be98a7SChris Wilson * S3->S5->S0: destroy path 5110be98a7SChris Wilson * S4->S5->S0: destroy path on current context 5210be98a7SChris Wilson * 5310be98a7SChris Wilson * There are two confusing terms used above: 5410be98a7SChris Wilson * The "current context" means the context which is currently running on the 5510be98a7SChris Wilson * GPU. The GPU has loaded its state already and has stored away the gtt 5610be98a7SChris Wilson * offset of the BO. The GPU is not actively referencing the data at this 5710be98a7SChris Wilson * offset, but it will on the next context switch. The only way to avoid this 5810be98a7SChris Wilson * is to do a GPU reset. 5910be98a7SChris Wilson * 6010be98a7SChris Wilson * An "active context' is one which was previously the "current context" and is 6110be98a7SChris Wilson * on the active list waiting for the next context switch to occur. Until this 6210be98a7SChris Wilson * happens, the object must remain at the same gtt offset. It is therefore 6310be98a7SChris Wilson * possible to destroy a context, but it is still active. 6410be98a7SChris Wilson * 6510be98a7SChris Wilson */ 6610be98a7SChris Wilson 67e9b67ec2SJani Nikula #include <linux/highmem.h> 6810be98a7SChris Wilson #include <linux/log2.h> 6910be98a7SChris Wilson #include <linux/nospec.h> 7010be98a7SChris Wilson 715f2ec909SJani Nikula #include <drm/drm_cache.h> 7200dae4d3SJason Ekstrand #include <drm/drm_syncobj.h> 7300dae4d3SJason Ekstrand 742c86e55dSMatthew Auld #include "gt/gen6_ppgtt.h" 759f3ccd40SChris Wilson #include "gt/intel_context.h" 7688be76cdSChris Wilson #include "gt/intel_context_param.h" 772e0986a5SChris Wilson #include "gt/intel_engine_heartbeat.h" 78750e76b4SChris Wilson #include "gt/intel_engine_user.h" 7945233ab2SChris Wilson #include "gt/intel_gpu_commands.h" 802871ea85SChris Wilson #include "gt/intel_ring.h" 8110be98a7SChris Wilson 82d3ac8d42SDaniele Ceraolo Spurio #include "pxp/intel_pxp.h" 83d3ac8d42SDaniele Ceraolo Spurio 845472b3f2SJani Nikula #include "i915_file_private.h" 8510be98a7SChris Wilson #include "i915_gem_context.h" 8610be98a7SChris Wilson #include "i915_trace.h" 8710be98a7SChris Wilson #include "i915_user_extensions.h" 8810be98a7SChris Wilson 8910be98a7SChris Wilson #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 9010be98a7SChris Wilson 91a6270d1dSDaniel Vetter static struct kmem_cache *slab_luts; 9210be98a7SChris Wilson 9310be98a7SChris Wilson struct i915_lut_handle *i915_lut_handle_alloc(void) 9410be98a7SChris Wilson { 95a6270d1dSDaniel Vetter return kmem_cache_alloc(slab_luts, GFP_KERNEL); 9610be98a7SChris Wilson } 9710be98a7SChris Wilson 9810be98a7SChris Wilson void i915_lut_handle_free(struct i915_lut_handle *lut) 9910be98a7SChris Wilson { 100a6270d1dSDaniel Vetter return kmem_cache_free(slab_luts, lut); 10110be98a7SChris Wilson } 10210be98a7SChris Wilson 10310be98a7SChris Wilson static void lut_close(struct i915_gem_context *ctx) 10410be98a7SChris Wilson { 10510be98a7SChris Wilson struct radix_tree_iter iter; 10610be98a7SChris Wilson void __rcu **slot; 10710be98a7SChris Wilson 108f7ce8639SChris Wilson mutex_lock(&ctx->lut_mutex); 10910be98a7SChris Wilson rcu_read_lock(); 11010be98a7SChris Wilson radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 11110be98a7SChris Wilson struct i915_vma *vma = rcu_dereference_raw(*slot); 112155ab883SChris Wilson struct drm_i915_gem_object *obj = vma->obj; 113155ab883SChris Wilson struct i915_lut_handle *lut; 11410be98a7SChris Wilson 115155ab883SChris Wilson if (!kref_get_unless_zero(&obj->base.refcount)) 116155ab883SChris Wilson continue; 117155ab883SChris Wilson 118096a42ddSChris Wilson spin_lock(&obj->lut_lock); 119155ab883SChris Wilson list_for_each_entry(lut, &obj->lut_list, obj_link) { 120155ab883SChris Wilson if (lut->ctx != ctx) 121155ab883SChris Wilson continue; 122155ab883SChris Wilson 123155ab883SChris Wilson if (lut->handle != iter.index) 124155ab883SChris Wilson continue; 125155ab883SChris Wilson 126155ab883SChris Wilson list_del(&lut->obj_link); 127155ab883SChris Wilson break; 128155ab883SChris Wilson } 129096a42ddSChris Wilson spin_unlock(&obj->lut_lock); 130155ab883SChris Wilson 131155ab883SChris Wilson if (&lut->obj_link != &obj->lut_list) { 132155ab883SChris Wilson i915_lut_handle_free(lut); 13310be98a7SChris Wilson radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 134155ab883SChris Wilson i915_vma_close(vma); 135155ab883SChris Wilson i915_gem_object_put(obj); 136155ab883SChris Wilson } 13710be98a7SChris Wilson 138155ab883SChris Wilson i915_gem_object_put(obj); 13910be98a7SChris Wilson } 14010be98a7SChris Wilson rcu_read_unlock(); 141f7ce8639SChris Wilson mutex_unlock(&ctx->lut_mutex); 14210be98a7SChris Wilson } 14310be98a7SChris Wilson 14410be98a7SChris Wilson static struct intel_context * 14510be98a7SChris Wilson lookup_user_engine(struct i915_gem_context *ctx, 14610be98a7SChris Wilson unsigned long flags, 14710be98a7SChris Wilson const struct i915_engine_class_instance *ci) 14810be98a7SChris Wilson #define LOOKUP_USER_INDEX BIT(0) 14910be98a7SChris Wilson { 15010be98a7SChris Wilson int idx; 15110be98a7SChris Wilson 15210be98a7SChris Wilson if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 15310be98a7SChris Wilson return ERR_PTR(-EINVAL); 15410be98a7SChris Wilson 15510be98a7SChris Wilson if (!i915_gem_context_user_engines(ctx)) { 15610be98a7SChris Wilson struct intel_engine_cs *engine; 15710be98a7SChris Wilson 15810be98a7SChris Wilson engine = intel_engine_lookup_user(ctx->i915, 15910be98a7SChris Wilson ci->engine_class, 16010be98a7SChris Wilson ci->engine_instance); 16110be98a7SChris Wilson if (!engine) 16210be98a7SChris Wilson return ERR_PTR(-EINVAL); 16310be98a7SChris Wilson 164f1c4d157SChris Wilson idx = engine->legacy_idx; 16510be98a7SChris Wilson } else { 16610be98a7SChris Wilson idx = ci->engine_instance; 16710be98a7SChris Wilson } 16810be98a7SChris Wilson 16910be98a7SChris Wilson return i915_gem_context_get_engine(ctx, idx); 17010be98a7SChris Wilson } 17110be98a7SChris Wilson 172aaa5957cSJason Ekstrand static int validate_priority(struct drm_i915_private *i915, 173aaa5957cSJason Ekstrand const struct drm_i915_gem_context_param *args) 174aaa5957cSJason Ekstrand { 175aaa5957cSJason Ekstrand s64 priority = args->value; 176aaa5957cSJason Ekstrand 177aaa5957cSJason Ekstrand if (args->size) 178aaa5957cSJason Ekstrand return -EINVAL; 179aaa5957cSJason Ekstrand 180aaa5957cSJason Ekstrand if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 181aaa5957cSJason Ekstrand return -ENODEV; 182aaa5957cSJason Ekstrand 183aaa5957cSJason Ekstrand if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 184aaa5957cSJason Ekstrand priority < I915_CONTEXT_MIN_USER_PRIORITY) 185aaa5957cSJason Ekstrand return -EINVAL; 186aaa5957cSJason Ekstrand 187aaa5957cSJason Ekstrand if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 188aaa5957cSJason Ekstrand !capable(CAP_SYS_NICE)) 189aaa5957cSJason Ekstrand return -EPERM; 190aaa5957cSJason Ekstrand 191aaa5957cSJason Ekstrand return 0; 192aaa5957cSJason Ekstrand } 193aaa5957cSJason Ekstrand 194d3ac8d42SDaniele Ceraolo Spurio static void proto_context_close(struct drm_i915_private *i915, 195d3ac8d42SDaniele Ceraolo Spurio struct i915_gem_proto_context *pc) 196a34857dcSJason Ekstrand { 197d4433c76SJason Ekstrand int i; 198d4433c76SJason Ekstrand 199d3ac8d42SDaniele Ceraolo Spurio if (pc->pxp_wakeref) 200d3ac8d42SDaniele Ceraolo Spurio intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref); 201a34857dcSJason Ekstrand if (pc->vm) 202a34857dcSJason Ekstrand i915_vm_put(pc->vm); 203d4433c76SJason Ekstrand if (pc->user_engines) { 204d4433c76SJason Ekstrand for (i = 0; i < pc->num_user_engines; i++) 205d4433c76SJason Ekstrand kfree(pc->user_engines[i].siblings); 206d4433c76SJason Ekstrand kfree(pc->user_engines); 207d4433c76SJason Ekstrand } 208a34857dcSJason Ekstrand kfree(pc); 209a34857dcSJason Ekstrand } 210a34857dcSJason Ekstrand 211d4433c76SJason Ekstrand static int proto_context_set_persistence(struct drm_i915_private *i915, 212d4433c76SJason Ekstrand struct i915_gem_proto_context *pc, 213d4433c76SJason Ekstrand bool persist) 214d4433c76SJason Ekstrand { 215d4433c76SJason Ekstrand if (persist) { 216d4433c76SJason Ekstrand /* 217d4433c76SJason Ekstrand * Only contexts that are short-lived [that will expire or be 218d4433c76SJason Ekstrand * reset] are allowed to survive past termination. We require 219d4433c76SJason Ekstrand * hangcheck to ensure that the persistent requests are healthy. 220d4433c76SJason Ekstrand */ 221d4433c76SJason Ekstrand if (!i915->params.enable_hangcheck) 222d4433c76SJason Ekstrand return -EINVAL; 223d4433c76SJason Ekstrand 224d4433c76SJason Ekstrand pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); 225d4433c76SJason Ekstrand } else { 226d4433c76SJason Ekstrand /* To cancel a context we use "preempt-to-idle" */ 227d4433c76SJason Ekstrand if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 228d4433c76SJason Ekstrand return -ENODEV; 229d4433c76SJason Ekstrand 230d4433c76SJason Ekstrand /* 231d4433c76SJason Ekstrand * If the cancel fails, we then need to reset, cleanly! 232d4433c76SJason Ekstrand * 233d4433c76SJason Ekstrand * If the per-engine reset fails, all hope is lost! We resort 234d4433c76SJason Ekstrand * to a full GPU reset in that unlikely case, but realistically 235d4433c76SJason Ekstrand * if the engine could not reset, the full reset does not fare 236d4433c76SJason Ekstrand * much better. The damage has been done. 237d4433c76SJason Ekstrand * 238d4433c76SJason Ekstrand * However, if we cannot reset an engine by itself, we cannot 239d4433c76SJason Ekstrand * cleanup a hanging persistent context without causing 240d4433c76SJason Ekstrand * colateral damage, and we should not pretend we can by 241d4433c76SJason Ekstrand * exposing the interface. 242d4433c76SJason Ekstrand */ 2431a9c4db4SMichał Winiarski if (!intel_has_reset_engine(to_gt(i915))) 244d4433c76SJason Ekstrand return -ENODEV; 245d4433c76SJason Ekstrand 246d4433c76SJason Ekstrand pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); 247d4433c76SJason Ekstrand } 248d4433c76SJason Ekstrand 249d4433c76SJason Ekstrand return 0; 250d4433c76SJason Ekstrand } 251d4433c76SJason Ekstrand 252d3ac8d42SDaniele Ceraolo Spurio static int proto_context_set_protected(struct drm_i915_private *i915, 253d3ac8d42SDaniele Ceraolo Spurio struct i915_gem_proto_context *pc, 254d3ac8d42SDaniele Ceraolo Spurio bool protected) 255d3ac8d42SDaniele Ceraolo Spurio { 256d3ac8d42SDaniele Ceraolo Spurio int ret = 0; 257d3ac8d42SDaniele Ceraolo Spurio 258d3ac8d42SDaniele Ceraolo Spurio if (!protected) { 259d3ac8d42SDaniele Ceraolo Spurio pc->uses_protected_content = false; 260f67986b0SAlan Previn } else if (!intel_pxp_is_enabled(i915->pxp)) { 261d3ac8d42SDaniele Ceraolo Spurio ret = -ENODEV; 262d3ac8d42SDaniele Ceraolo Spurio } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || 263d3ac8d42SDaniele Ceraolo Spurio !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { 264d3ac8d42SDaniele Ceraolo Spurio ret = -EPERM; 265d3ac8d42SDaniele Ceraolo Spurio } else { 266d3ac8d42SDaniele Ceraolo Spurio pc->uses_protected_content = true; 267d3ac8d42SDaniele Ceraolo Spurio 268d3ac8d42SDaniele Ceraolo Spurio /* 269d3ac8d42SDaniele Ceraolo Spurio * protected context usage requires the PXP session to be up, 270d3ac8d42SDaniele Ceraolo Spurio * which in turn requires the device to be active. 271d3ac8d42SDaniele Ceraolo Spurio */ 272d3ac8d42SDaniele Ceraolo Spurio pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); 27332271ecdSDaniele Ceraolo Spurio 274f67986b0SAlan Previn if (!intel_pxp_is_active(i915->pxp)) 275f67986b0SAlan Previn ret = intel_pxp_start(i915->pxp); 276d3ac8d42SDaniele Ceraolo Spurio } 277d3ac8d42SDaniele Ceraolo Spurio 278d3ac8d42SDaniele Ceraolo Spurio return ret; 279d3ac8d42SDaniele Ceraolo Spurio } 280d3ac8d42SDaniele Ceraolo Spurio 281a34857dcSJason Ekstrand static struct i915_gem_proto_context * 282a34857dcSJason Ekstrand proto_context_create(struct drm_i915_private *i915, unsigned int flags) 283a34857dcSJason Ekstrand { 284a34857dcSJason Ekstrand struct i915_gem_proto_context *pc, *err; 285a34857dcSJason Ekstrand 286a34857dcSJason Ekstrand pc = kzalloc(sizeof(*pc), GFP_KERNEL); 287a34857dcSJason Ekstrand if (!pc) 288a34857dcSJason Ekstrand return ERR_PTR(-ENOMEM); 289a34857dcSJason Ekstrand 290d4433c76SJason Ekstrand pc->num_user_engines = -1; 291d4433c76SJason Ekstrand pc->user_engines = NULL; 292a34857dcSJason Ekstrand pc->user_flags = BIT(UCONTEXT_BANNABLE) | 293a34857dcSJason Ekstrand BIT(UCONTEXT_RECOVERABLE); 294a34857dcSJason Ekstrand if (i915->params.enable_hangcheck) 295a34857dcSJason Ekstrand pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); 296a34857dcSJason Ekstrand pc->sched.priority = I915_PRIORITY_NORMAL; 297a34857dcSJason Ekstrand 298a34857dcSJason Ekstrand if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 299a34857dcSJason Ekstrand if (!HAS_EXECLISTS(i915)) { 300a34857dcSJason Ekstrand err = ERR_PTR(-EINVAL); 301a34857dcSJason Ekstrand goto proto_close; 302a34857dcSJason Ekstrand } 303a34857dcSJason Ekstrand pc->single_timeline = true; 304a34857dcSJason Ekstrand } 305a34857dcSJason Ekstrand 306a34857dcSJason Ekstrand return pc; 307a34857dcSJason Ekstrand 308a34857dcSJason Ekstrand proto_close: 309d3ac8d42SDaniele Ceraolo Spurio proto_context_close(i915, pc); 310a34857dcSJason Ekstrand return err; 311a34857dcSJason Ekstrand } 312a34857dcSJason Ekstrand 313a4c1cdd3SJason Ekstrand static int proto_context_register_locked(struct drm_i915_file_private *fpriv, 314a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc, 315a4c1cdd3SJason Ekstrand u32 *id) 316a4c1cdd3SJason Ekstrand { 317a4c1cdd3SJason Ekstrand int ret; 318a4c1cdd3SJason Ekstrand void *old; 319a4c1cdd3SJason Ekstrand 320a4c1cdd3SJason Ekstrand lockdep_assert_held(&fpriv->proto_context_lock); 321a4c1cdd3SJason Ekstrand 322a4c1cdd3SJason Ekstrand ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL); 323a4c1cdd3SJason Ekstrand if (ret) 324a4c1cdd3SJason Ekstrand return ret; 325a4c1cdd3SJason Ekstrand 326a4c1cdd3SJason Ekstrand old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL); 327a4c1cdd3SJason Ekstrand if (xa_is_err(old)) { 328a4c1cdd3SJason Ekstrand xa_erase(&fpriv->context_xa, *id); 329a4c1cdd3SJason Ekstrand return xa_err(old); 330a4c1cdd3SJason Ekstrand } 331a4c1cdd3SJason Ekstrand WARN_ON(old); 332a4c1cdd3SJason Ekstrand 333a4c1cdd3SJason Ekstrand return 0; 334a4c1cdd3SJason Ekstrand } 335a4c1cdd3SJason Ekstrand 336a4c1cdd3SJason Ekstrand static int proto_context_register(struct drm_i915_file_private *fpriv, 337a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc, 338a4c1cdd3SJason Ekstrand u32 *id) 339a4c1cdd3SJason Ekstrand { 340a4c1cdd3SJason Ekstrand int ret; 341a4c1cdd3SJason Ekstrand 342a4c1cdd3SJason Ekstrand mutex_lock(&fpriv->proto_context_lock); 343a4c1cdd3SJason Ekstrand ret = proto_context_register_locked(fpriv, pc, id); 344a4c1cdd3SJason Ekstrand mutex_unlock(&fpriv->proto_context_lock); 345a4c1cdd3SJason Ekstrand 346a4c1cdd3SJason Ekstrand return ret; 347a4c1cdd3SJason Ekstrand } 348a4c1cdd3SJason Ekstrand 349d83d5298SJani Nikula static struct i915_address_space * 350d83d5298SJani Nikula i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) 351d83d5298SJani Nikula { 352d83d5298SJani Nikula struct i915_address_space *vm; 353d83d5298SJani Nikula 354d83d5298SJani Nikula xa_lock(&file_priv->vm_xa); 355d83d5298SJani Nikula vm = xa_load(&file_priv->vm_xa, id); 356d83d5298SJani Nikula if (vm) 357d83d5298SJani Nikula kref_get(&vm->ref); 358d83d5298SJani Nikula xa_unlock(&file_priv->vm_xa); 359d83d5298SJani Nikula 360d83d5298SJani Nikula return vm; 361d83d5298SJani Nikula } 362d83d5298SJani Nikula 363d4433c76SJason Ekstrand static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv, 364d4433c76SJason Ekstrand struct i915_gem_proto_context *pc, 365d4433c76SJason Ekstrand const struct drm_i915_gem_context_param *args) 366d4433c76SJason Ekstrand { 367d4433c76SJason Ekstrand struct drm_i915_private *i915 = fpriv->dev_priv; 368d4433c76SJason Ekstrand struct i915_address_space *vm; 369d4433c76SJason Ekstrand 370d4433c76SJason Ekstrand if (args->size) 371d4433c76SJason Ekstrand return -EINVAL; 372d4433c76SJason Ekstrand 373d4433c76SJason Ekstrand if (!HAS_FULL_PPGTT(i915)) 374d4433c76SJason Ekstrand return -ENODEV; 375d4433c76SJason Ekstrand 376d4433c76SJason Ekstrand if (upper_32_bits(args->value)) 377d4433c76SJason Ekstrand return -ENOENT; 378d4433c76SJason Ekstrand 379d4433c76SJason Ekstrand vm = i915_gem_vm_lookup(fpriv, args->value); 380d4433c76SJason Ekstrand if (!vm) 381d4433c76SJason Ekstrand return -ENOENT; 382d4433c76SJason Ekstrand 383d4433c76SJason Ekstrand if (pc->vm) 384d4433c76SJason Ekstrand i915_vm_put(pc->vm); 385d4433c76SJason Ekstrand pc->vm = vm; 386d4433c76SJason Ekstrand 387d4433c76SJason Ekstrand return 0; 388d4433c76SJason Ekstrand } 389d4433c76SJason Ekstrand 390d4433c76SJason Ekstrand struct set_proto_ctx_engines { 391d4433c76SJason Ekstrand struct drm_i915_private *i915; 392d4433c76SJason Ekstrand unsigned num_engines; 393d4433c76SJason Ekstrand struct i915_gem_proto_engine *engines; 394d4433c76SJason Ekstrand }; 395d4433c76SJason Ekstrand 396d4433c76SJason Ekstrand static int 397d4433c76SJason Ekstrand set_proto_ctx_engines_balance(struct i915_user_extension __user *base, 398d4433c76SJason Ekstrand void *data) 399d4433c76SJason Ekstrand { 400d4433c76SJason Ekstrand struct i915_context_engines_load_balance __user *ext = 401d4433c76SJason Ekstrand container_of_user(base, typeof(*ext), base); 402d4433c76SJason Ekstrand const struct set_proto_ctx_engines *set = data; 403d4433c76SJason Ekstrand struct drm_i915_private *i915 = set->i915; 404d4433c76SJason Ekstrand struct intel_engine_cs **siblings; 405d4433c76SJason Ekstrand u16 num_siblings, idx; 406d4433c76SJason Ekstrand unsigned int n; 407d4433c76SJason Ekstrand int err; 408d4433c76SJason Ekstrand 409d4433c76SJason Ekstrand if (!HAS_EXECLISTS(i915)) 410d4433c76SJason Ekstrand return -ENODEV; 411d4433c76SJason Ekstrand 412d4433c76SJason Ekstrand if (get_user(idx, &ext->engine_index)) 413d4433c76SJason Ekstrand return -EFAULT; 414d4433c76SJason Ekstrand 415d4433c76SJason Ekstrand if (idx >= set->num_engines) { 416d4433c76SJason Ekstrand drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 417d4433c76SJason Ekstrand idx, set->num_engines); 418d4433c76SJason Ekstrand return -EINVAL; 419d4433c76SJason Ekstrand } 420d4433c76SJason Ekstrand 421d4433c76SJason Ekstrand idx = array_index_nospec(idx, set->num_engines); 422d4433c76SJason Ekstrand if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) { 423d4433c76SJason Ekstrand drm_dbg(&i915->drm, 424d4433c76SJason Ekstrand "Invalid placement[%d], already occupied\n", idx); 425d4433c76SJason Ekstrand return -EEXIST; 426d4433c76SJason Ekstrand } 427d4433c76SJason Ekstrand 428d4433c76SJason Ekstrand if (get_user(num_siblings, &ext->num_siblings)) 429d4433c76SJason Ekstrand return -EFAULT; 430d4433c76SJason Ekstrand 431d4433c76SJason Ekstrand err = check_user_mbz(&ext->flags); 432d4433c76SJason Ekstrand if (err) 433d4433c76SJason Ekstrand return err; 434d4433c76SJason Ekstrand 435d4433c76SJason Ekstrand err = check_user_mbz(&ext->mbz64); 436d4433c76SJason Ekstrand if (err) 437d4433c76SJason Ekstrand return err; 438d4433c76SJason Ekstrand 439d4433c76SJason Ekstrand if (num_siblings == 0) 440d4433c76SJason Ekstrand return 0; 441d4433c76SJason Ekstrand 442d4433c76SJason Ekstrand siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); 443d4433c76SJason Ekstrand if (!siblings) 444d4433c76SJason Ekstrand return -ENOMEM; 445d4433c76SJason Ekstrand 446d4433c76SJason Ekstrand for (n = 0; n < num_siblings; n++) { 447d4433c76SJason Ekstrand struct i915_engine_class_instance ci; 448d4433c76SJason Ekstrand 449d4433c76SJason Ekstrand if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 450d4433c76SJason Ekstrand err = -EFAULT; 451d4433c76SJason Ekstrand goto err_siblings; 452d4433c76SJason Ekstrand } 453d4433c76SJason Ekstrand 454d4433c76SJason Ekstrand siblings[n] = intel_engine_lookup_user(i915, 455d4433c76SJason Ekstrand ci.engine_class, 456d4433c76SJason Ekstrand ci.engine_instance); 457d4433c76SJason Ekstrand if (!siblings[n]) { 458d4433c76SJason Ekstrand drm_dbg(&i915->drm, 459d4433c76SJason Ekstrand "Invalid sibling[%d]: { class:%d, inst:%d }\n", 460d4433c76SJason Ekstrand n, ci.engine_class, ci.engine_instance); 461d4433c76SJason Ekstrand err = -EINVAL; 462d4433c76SJason Ekstrand goto err_siblings; 463d4433c76SJason Ekstrand } 464d4433c76SJason Ekstrand } 465d4433c76SJason Ekstrand 466d4433c76SJason Ekstrand if (num_siblings == 1) { 467d4433c76SJason Ekstrand set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL; 468d4433c76SJason Ekstrand set->engines[idx].engine = siblings[0]; 469d4433c76SJason Ekstrand kfree(siblings); 470d4433c76SJason Ekstrand } else { 471d4433c76SJason Ekstrand set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED; 472d4433c76SJason Ekstrand set->engines[idx].num_siblings = num_siblings; 473d4433c76SJason Ekstrand set->engines[idx].siblings = siblings; 474d4433c76SJason Ekstrand } 475d4433c76SJason Ekstrand 476d4433c76SJason Ekstrand return 0; 477d4433c76SJason Ekstrand 478d4433c76SJason Ekstrand err_siblings: 479d4433c76SJason Ekstrand kfree(siblings); 480d4433c76SJason Ekstrand 481d4433c76SJason Ekstrand return err; 482d4433c76SJason Ekstrand } 483d4433c76SJason Ekstrand 484d4433c76SJason Ekstrand static int 485d4433c76SJason Ekstrand set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) 486d4433c76SJason Ekstrand { 487d4433c76SJason Ekstrand struct i915_context_engines_bond __user *ext = 488d4433c76SJason Ekstrand container_of_user(base, typeof(*ext), base); 489d4433c76SJason Ekstrand const struct set_proto_ctx_engines *set = data; 490d4433c76SJason Ekstrand struct drm_i915_private *i915 = set->i915; 491d4433c76SJason Ekstrand struct i915_engine_class_instance ci; 492d4433c76SJason Ekstrand struct intel_engine_cs *master; 493d4433c76SJason Ekstrand u16 idx, num_bonds; 494d4433c76SJason Ekstrand int err, n; 495d4433c76SJason Ekstrand 496ce7e75c7SMatthew Brost if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && 497ce7e75c7SMatthew Brost !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { 498ce7e75c7SMatthew Brost drm_dbg(&i915->drm, 4992c85034dSRodrigo Vivi "Bonding not supported on this platform\n"); 500ce7e75c7SMatthew Brost return -ENODEV; 501ce7e75c7SMatthew Brost } 502ce7e75c7SMatthew Brost 503d4433c76SJason Ekstrand if (get_user(idx, &ext->virtual_index)) 504d4433c76SJason Ekstrand return -EFAULT; 505d4433c76SJason Ekstrand 506d4433c76SJason Ekstrand if (idx >= set->num_engines) { 507d4433c76SJason Ekstrand drm_dbg(&i915->drm, 508d4433c76SJason Ekstrand "Invalid index for virtual engine: %d >= %d\n", 509d4433c76SJason Ekstrand idx, set->num_engines); 510d4433c76SJason Ekstrand return -EINVAL; 511d4433c76SJason Ekstrand } 512d4433c76SJason Ekstrand 513d4433c76SJason Ekstrand idx = array_index_nospec(idx, set->num_engines); 514d4433c76SJason Ekstrand if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) { 515d4433c76SJason Ekstrand drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 516d4433c76SJason Ekstrand return -EINVAL; 517d4433c76SJason Ekstrand } 518d4433c76SJason Ekstrand 519d4433c76SJason Ekstrand if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) { 520d4433c76SJason Ekstrand drm_dbg(&i915->drm, 521d4433c76SJason Ekstrand "Bonding with virtual engines not allowed\n"); 522d4433c76SJason Ekstrand return -EINVAL; 523d4433c76SJason Ekstrand } 524d4433c76SJason Ekstrand 525d4433c76SJason Ekstrand err = check_user_mbz(&ext->flags); 526d4433c76SJason Ekstrand if (err) 527d4433c76SJason Ekstrand return err; 528d4433c76SJason Ekstrand 529d4433c76SJason Ekstrand for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 530d4433c76SJason Ekstrand err = check_user_mbz(&ext->mbz64[n]); 531d4433c76SJason Ekstrand if (err) 532d4433c76SJason Ekstrand return err; 533d4433c76SJason Ekstrand } 534d4433c76SJason Ekstrand 535d4433c76SJason Ekstrand if (copy_from_user(&ci, &ext->master, sizeof(ci))) 536d4433c76SJason Ekstrand return -EFAULT; 537d4433c76SJason Ekstrand 538d4433c76SJason Ekstrand master = intel_engine_lookup_user(i915, 539d4433c76SJason Ekstrand ci.engine_class, 540d4433c76SJason Ekstrand ci.engine_instance); 541d4433c76SJason Ekstrand if (!master) { 542d4433c76SJason Ekstrand drm_dbg(&i915->drm, 543d4433c76SJason Ekstrand "Unrecognised master engine: { class:%u, instance:%u }\n", 544d4433c76SJason Ekstrand ci.engine_class, ci.engine_instance); 545d4433c76SJason Ekstrand return -EINVAL; 546d4433c76SJason Ekstrand } 547d4433c76SJason Ekstrand 548b02d86b9SMatthew Brost if (intel_engine_uses_guc(master)) { 549a10234fdSTvrtko Ursulin drm_dbg(&i915->drm, "bonding extension not supported with GuC submission"); 550b02d86b9SMatthew Brost return -ENODEV; 551b02d86b9SMatthew Brost } 552b02d86b9SMatthew Brost 553d4433c76SJason Ekstrand if (get_user(num_bonds, &ext->num_bonds)) 554d4433c76SJason Ekstrand return -EFAULT; 555d4433c76SJason Ekstrand 556d4433c76SJason Ekstrand for (n = 0; n < num_bonds; n++) { 557d4433c76SJason Ekstrand struct intel_engine_cs *bond; 558d4433c76SJason Ekstrand 559d4433c76SJason Ekstrand if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 560d4433c76SJason Ekstrand return -EFAULT; 561d4433c76SJason Ekstrand 562d4433c76SJason Ekstrand bond = intel_engine_lookup_user(i915, 563d4433c76SJason Ekstrand ci.engine_class, 564d4433c76SJason Ekstrand ci.engine_instance); 565d4433c76SJason Ekstrand if (!bond) { 566d4433c76SJason Ekstrand drm_dbg(&i915->drm, 567d4433c76SJason Ekstrand "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 568d4433c76SJason Ekstrand n, ci.engine_class, ci.engine_instance); 569d4433c76SJason Ekstrand return -EINVAL; 570d4433c76SJason Ekstrand } 571d4433c76SJason Ekstrand } 572d4433c76SJason Ekstrand 573d4433c76SJason Ekstrand return 0; 574d4433c76SJason Ekstrand } 575d4433c76SJason Ekstrand 576e5e32171SMatthew Brost static int 577e5e32171SMatthew Brost set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, 578e5e32171SMatthew Brost void *data) 579e5e32171SMatthew Brost { 580e5e32171SMatthew Brost struct i915_context_engines_parallel_submit __user *ext = 581e5e32171SMatthew Brost container_of_user(base, typeof(*ext), base); 582e5e32171SMatthew Brost const struct set_proto_ctx_engines *set = data; 583e5e32171SMatthew Brost struct drm_i915_private *i915 = set->i915; 5840f9d36afSMatthew Brost struct i915_engine_class_instance prev_engine; 585e5e32171SMatthew Brost u64 flags; 586e5e32171SMatthew Brost int err = 0, n, i, j; 587e5e32171SMatthew Brost u16 slot, width, num_siblings; 588e5e32171SMatthew Brost struct intel_engine_cs **siblings = NULL; 589e5e32171SMatthew Brost intel_engine_mask_t prev_mask; 590e5e32171SMatthew Brost 591e5e32171SMatthew Brost if (get_user(slot, &ext->engine_index)) 592e5e32171SMatthew Brost return -EFAULT; 593e5e32171SMatthew Brost 594e5e32171SMatthew Brost if (get_user(width, &ext->width)) 595e5e32171SMatthew Brost return -EFAULT; 596e5e32171SMatthew Brost 597e5e32171SMatthew Brost if (get_user(num_siblings, &ext->num_siblings)) 598e5e32171SMatthew Brost return -EFAULT; 599e5e32171SMatthew Brost 600a88afcfaSMatthew Brost if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) && 601a88afcfaSMatthew Brost num_siblings != 1) { 602a88afcfaSMatthew Brost drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n", 603a88afcfaSMatthew Brost num_siblings); 604a88afcfaSMatthew Brost return -EINVAL; 605a88afcfaSMatthew Brost } 606a88afcfaSMatthew Brost 607e5e32171SMatthew Brost if (slot >= set->num_engines) { 608e5e32171SMatthew Brost drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 609e5e32171SMatthew Brost slot, set->num_engines); 610e5e32171SMatthew Brost return -EINVAL; 611e5e32171SMatthew Brost } 612e5e32171SMatthew Brost 613e5e32171SMatthew Brost if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { 614e5e32171SMatthew Brost drm_dbg(&i915->drm, 615e5e32171SMatthew Brost "Invalid placement[%d], already occupied\n", slot); 616e5e32171SMatthew Brost return -EINVAL; 617e5e32171SMatthew Brost } 618e5e32171SMatthew Brost 619e5e32171SMatthew Brost if (get_user(flags, &ext->flags)) 620e5e32171SMatthew Brost return -EFAULT; 621e5e32171SMatthew Brost 622e5e32171SMatthew Brost if (flags) { 623e5e32171SMatthew Brost drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags); 624e5e32171SMatthew Brost return -EINVAL; 625e5e32171SMatthew Brost } 626e5e32171SMatthew Brost 627e5e32171SMatthew Brost for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 628e5e32171SMatthew Brost err = check_user_mbz(&ext->mbz64[n]); 629e5e32171SMatthew Brost if (err) 630e5e32171SMatthew Brost return err; 631e5e32171SMatthew Brost } 632e5e32171SMatthew Brost 633e5e32171SMatthew Brost if (width < 2) { 634e5e32171SMatthew Brost drm_dbg(&i915->drm, "Width (%d) < 2\n", width); 635e5e32171SMatthew Brost return -EINVAL; 636e5e32171SMatthew Brost } 637e5e32171SMatthew Brost 638e5e32171SMatthew Brost if (num_siblings < 1) { 639e5e32171SMatthew Brost drm_dbg(&i915->drm, "Number siblings (%d) < 1\n", 640e5e32171SMatthew Brost num_siblings); 641e5e32171SMatthew Brost return -EINVAL; 642e5e32171SMatthew Brost } 643e5e32171SMatthew Brost 644e5e32171SMatthew Brost siblings = kmalloc_array(num_siblings * width, 645e5e32171SMatthew Brost sizeof(*siblings), 646e5e32171SMatthew Brost GFP_KERNEL); 647e5e32171SMatthew Brost if (!siblings) 648e5e32171SMatthew Brost return -ENOMEM; 649e5e32171SMatthew Brost 650e5e32171SMatthew Brost /* Create contexts / engines */ 651e5e32171SMatthew Brost for (i = 0; i < width; ++i) { 652e5e32171SMatthew Brost intel_engine_mask_t current_mask = 0; 653e5e32171SMatthew Brost 654e5e32171SMatthew Brost for (j = 0; j < num_siblings; ++j) { 655e5e32171SMatthew Brost struct i915_engine_class_instance ci; 656e5e32171SMatthew Brost 657e5e32171SMatthew Brost n = i * num_siblings + j; 658e5e32171SMatthew Brost if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 659e5e32171SMatthew Brost err = -EFAULT; 660e5e32171SMatthew Brost goto out_err; 661e5e32171SMatthew Brost } 662e5e32171SMatthew Brost 663e5e32171SMatthew Brost siblings[n] = 664e5e32171SMatthew Brost intel_engine_lookup_user(i915, ci.engine_class, 665e5e32171SMatthew Brost ci.engine_instance); 666e5e32171SMatthew Brost if (!siblings[n]) { 667e5e32171SMatthew Brost drm_dbg(&i915->drm, 668e5e32171SMatthew Brost "Invalid sibling[%d]: { class:%d, inst:%d }\n", 669e5e32171SMatthew Brost n, ci.engine_class, ci.engine_instance); 670e5e32171SMatthew Brost err = -EINVAL; 671e5e32171SMatthew Brost goto out_err; 672e5e32171SMatthew Brost } 673e5e32171SMatthew Brost 674e393e2aaSMatthew Brost /* 675e393e2aaSMatthew Brost * We don't support breadcrumb handshake on these 676e393e2aaSMatthew Brost * classes 677e393e2aaSMatthew Brost */ 678e393e2aaSMatthew Brost if (siblings[n]->class == RENDER_CLASS || 679e393e2aaSMatthew Brost siblings[n]->class == COMPUTE_CLASS) { 680e393e2aaSMatthew Brost err = -EINVAL; 681e393e2aaSMatthew Brost goto out_err; 682e393e2aaSMatthew Brost } 683e393e2aaSMatthew Brost 684e5e32171SMatthew Brost if (n) { 685e5e32171SMatthew Brost if (prev_engine.engine_class != 686e5e32171SMatthew Brost ci.engine_class) { 687e5e32171SMatthew Brost drm_dbg(&i915->drm, 688e5e32171SMatthew Brost "Mismatched class %d, %d\n", 689e5e32171SMatthew Brost prev_engine.engine_class, 690e5e32171SMatthew Brost ci.engine_class); 691e5e32171SMatthew Brost err = -EINVAL; 692e5e32171SMatthew Brost goto out_err; 693e5e32171SMatthew Brost } 694e5e32171SMatthew Brost } 695e5e32171SMatthew Brost 696e5e32171SMatthew Brost prev_engine = ci; 697e5e32171SMatthew Brost current_mask |= siblings[n]->logical_mask; 698e5e32171SMatthew Brost } 699e5e32171SMatthew Brost 700e5e32171SMatthew Brost if (i > 0) { 701e5e32171SMatthew Brost if (current_mask != prev_mask << 1) { 702e5e32171SMatthew Brost drm_dbg(&i915->drm, 703e5e32171SMatthew Brost "Non contiguous logical mask 0x%x, 0x%x\n", 704e5e32171SMatthew Brost prev_mask, current_mask); 705e5e32171SMatthew Brost err = -EINVAL; 706e5e32171SMatthew Brost goto out_err; 707e5e32171SMatthew Brost } 708e5e32171SMatthew Brost } 709e5e32171SMatthew Brost prev_mask = current_mask; 710e5e32171SMatthew Brost } 711e5e32171SMatthew Brost 712e5e32171SMatthew Brost set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; 713e5e32171SMatthew Brost set->engines[slot].num_siblings = num_siblings; 714e5e32171SMatthew Brost set->engines[slot].width = width; 715e5e32171SMatthew Brost set->engines[slot].siblings = siblings; 716e5e32171SMatthew Brost 717e5e32171SMatthew Brost return 0; 718e5e32171SMatthew Brost 719e5e32171SMatthew Brost out_err: 720e5e32171SMatthew Brost kfree(siblings); 721e5e32171SMatthew Brost 722e5e32171SMatthew Brost return err; 723e5e32171SMatthew Brost } 724e5e32171SMatthew Brost 725d4433c76SJason Ekstrand static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { 726d4433c76SJason Ekstrand [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, 727d4433c76SJason Ekstrand [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, 728e5e32171SMatthew Brost [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] = 729e5e32171SMatthew Brost set_proto_ctx_engines_parallel_submit, 730d4433c76SJason Ekstrand }; 731d4433c76SJason Ekstrand 732d4433c76SJason Ekstrand static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, 733d4433c76SJason Ekstrand struct i915_gem_proto_context *pc, 734d4433c76SJason Ekstrand const struct drm_i915_gem_context_param *args) 735d4433c76SJason Ekstrand { 736d4433c76SJason Ekstrand struct drm_i915_private *i915 = fpriv->dev_priv; 737d4433c76SJason Ekstrand struct set_proto_ctx_engines set = { .i915 = i915 }; 738d4433c76SJason Ekstrand struct i915_context_param_engines __user *user = 739d4433c76SJason Ekstrand u64_to_user_ptr(args->value); 740d4433c76SJason Ekstrand unsigned int n; 741d4433c76SJason Ekstrand u64 extensions; 742d4433c76SJason Ekstrand int err; 743d4433c76SJason Ekstrand 744d4433c76SJason Ekstrand if (pc->num_user_engines >= 0) { 745d4433c76SJason Ekstrand drm_dbg(&i915->drm, "Cannot set engines twice"); 746d4433c76SJason Ekstrand return -EINVAL; 747d4433c76SJason Ekstrand } 748d4433c76SJason Ekstrand 749d4433c76SJason Ekstrand if (args->size < sizeof(*user) || 750d4433c76SJason Ekstrand !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) { 751d4433c76SJason Ekstrand drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 752d4433c76SJason Ekstrand args->size); 753d4433c76SJason Ekstrand return -EINVAL; 754d4433c76SJason Ekstrand } 755d4433c76SJason Ekstrand 756d4433c76SJason Ekstrand set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 757d4433c76SJason Ekstrand /* RING_MASK has no shift so we can use it directly here */ 758d4433c76SJason Ekstrand if (set.num_engines > I915_EXEC_RING_MASK + 1) 759d4433c76SJason Ekstrand return -EINVAL; 760d4433c76SJason Ekstrand 761d4433c76SJason Ekstrand set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL); 762d4433c76SJason Ekstrand if (!set.engines) 763d4433c76SJason Ekstrand return -ENOMEM; 764d4433c76SJason Ekstrand 765d4433c76SJason Ekstrand for (n = 0; n < set.num_engines; n++) { 766d4433c76SJason Ekstrand struct i915_engine_class_instance ci; 767d4433c76SJason Ekstrand struct intel_engine_cs *engine; 768d4433c76SJason Ekstrand 769d4433c76SJason Ekstrand if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 770d4433c76SJason Ekstrand kfree(set.engines); 771d4433c76SJason Ekstrand return -EFAULT; 772d4433c76SJason Ekstrand } 773d4433c76SJason Ekstrand 774d4433c76SJason Ekstrand memset(&set.engines[n], 0, sizeof(set.engines[n])); 775d4433c76SJason Ekstrand 776d4433c76SJason Ekstrand if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 777d4433c76SJason Ekstrand ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) 778d4433c76SJason Ekstrand continue; 779d4433c76SJason Ekstrand 780d4433c76SJason Ekstrand engine = intel_engine_lookup_user(i915, 781d4433c76SJason Ekstrand ci.engine_class, 782d4433c76SJason Ekstrand ci.engine_instance); 783d4433c76SJason Ekstrand if (!engine) { 784d4433c76SJason Ekstrand drm_dbg(&i915->drm, 785d4433c76SJason Ekstrand "Invalid engine[%d]: { class:%d, instance:%d }\n", 786d4433c76SJason Ekstrand n, ci.engine_class, ci.engine_instance); 787d4433c76SJason Ekstrand kfree(set.engines); 788d4433c76SJason Ekstrand return -ENOENT; 789d4433c76SJason Ekstrand } 790d4433c76SJason Ekstrand 791d4433c76SJason Ekstrand set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL; 792d4433c76SJason Ekstrand set.engines[n].engine = engine; 793d4433c76SJason Ekstrand } 794d4433c76SJason Ekstrand 795d4433c76SJason Ekstrand err = -EFAULT; 796d4433c76SJason Ekstrand if (!get_user(extensions, &user->extensions)) 797d4433c76SJason Ekstrand err = i915_user_extensions(u64_to_user_ptr(extensions), 798d4433c76SJason Ekstrand set_proto_ctx_engines_extensions, 799d4433c76SJason Ekstrand ARRAY_SIZE(set_proto_ctx_engines_extensions), 800d4433c76SJason Ekstrand &set); 801d4433c76SJason Ekstrand if (err) { 802d4433c76SJason Ekstrand kfree(set.engines); 803d4433c76SJason Ekstrand return err; 804d4433c76SJason Ekstrand } 805d4433c76SJason Ekstrand 806d4433c76SJason Ekstrand pc->num_user_engines = set.num_engines; 807d4433c76SJason Ekstrand pc->user_engines = set.engines; 808d4433c76SJason Ekstrand 809d4433c76SJason Ekstrand return 0; 810d4433c76SJason Ekstrand } 811d4433c76SJason Ekstrand 812d4433c76SJason Ekstrand static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, 813d4433c76SJason Ekstrand struct i915_gem_proto_context *pc, 814d4433c76SJason Ekstrand struct drm_i915_gem_context_param *args) 815d4433c76SJason Ekstrand { 816d4433c76SJason Ekstrand struct drm_i915_private *i915 = fpriv->dev_priv; 817d4433c76SJason Ekstrand struct drm_i915_gem_context_param_sseu user_sseu; 818d4433c76SJason Ekstrand struct intel_sseu *sseu; 819d4433c76SJason Ekstrand int ret; 820d4433c76SJason Ekstrand 821d4433c76SJason Ekstrand if (args->size < sizeof(user_sseu)) 822d4433c76SJason Ekstrand return -EINVAL; 823d4433c76SJason Ekstrand 824d4433c76SJason Ekstrand if (GRAPHICS_VER(i915) != 11) 825d4433c76SJason Ekstrand return -ENODEV; 826d4433c76SJason Ekstrand 827d4433c76SJason Ekstrand if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 828d4433c76SJason Ekstrand sizeof(user_sseu))) 829d4433c76SJason Ekstrand return -EFAULT; 830d4433c76SJason Ekstrand 831d4433c76SJason Ekstrand if (user_sseu.rsvd) 832d4433c76SJason Ekstrand return -EINVAL; 833d4433c76SJason Ekstrand 834d4433c76SJason Ekstrand if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 835d4433c76SJason Ekstrand return -EINVAL; 836d4433c76SJason Ekstrand 837d4433c76SJason Ekstrand if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0)) 838d4433c76SJason Ekstrand return -EINVAL; 839d4433c76SJason Ekstrand 840d4433c76SJason Ekstrand if (pc->num_user_engines >= 0) { 841d4433c76SJason Ekstrand int idx = user_sseu.engine.engine_instance; 842d4433c76SJason Ekstrand struct i915_gem_proto_engine *pe; 843d4433c76SJason Ekstrand 844d4433c76SJason Ekstrand if (idx >= pc->num_user_engines) 845d4433c76SJason Ekstrand return -EINVAL; 846d4433c76SJason Ekstrand 847d4433c76SJason Ekstrand pe = &pc->user_engines[idx]; 848d4433c76SJason Ekstrand 849d4433c76SJason Ekstrand /* Only render engine supports RPCS configuration. */ 850d4433c76SJason Ekstrand if (pe->engine->class != RENDER_CLASS) 851d4433c76SJason Ekstrand return -EINVAL; 852d4433c76SJason Ekstrand 853d4433c76SJason Ekstrand sseu = &pe->sseu; 854d4433c76SJason Ekstrand } else { 855d4433c76SJason Ekstrand /* Only render engine supports RPCS configuration. */ 856d4433c76SJason Ekstrand if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER) 857d4433c76SJason Ekstrand return -EINVAL; 858d4433c76SJason Ekstrand 859d4433c76SJason Ekstrand /* There is only one render engine */ 860d4433c76SJason Ekstrand if (user_sseu.engine.engine_instance != 0) 861d4433c76SJason Ekstrand return -EINVAL; 862d4433c76SJason Ekstrand 863d4433c76SJason Ekstrand sseu = &pc->legacy_rcs_sseu; 864d4433c76SJason Ekstrand } 865d4433c76SJason Ekstrand 8661a9c4db4SMichał Winiarski ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu); 867d4433c76SJason Ekstrand if (ret) 868d4433c76SJason Ekstrand return ret; 869d4433c76SJason Ekstrand 870d4433c76SJason Ekstrand args->size = sizeof(user_sseu); 871d4433c76SJason Ekstrand 872d4433c76SJason Ekstrand return 0; 873d4433c76SJason Ekstrand } 874d4433c76SJason Ekstrand 875d4433c76SJason Ekstrand static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, 876d4433c76SJason Ekstrand struct i915_gem_proto_context *pc, 877d4433c76SJason Ekstrand struct drm_i915_gem_context_param *args) 878d4433c76SJason Ekstrand { 879d4433c76SJason Ekstrand int ret = 0; 880d4433c76SJason Ekstrand 881d4433c76SJason Ekstrand switch (args->param) { 882d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 883d4433c76SJason Ekstrand if (args->size) 884d4433c76SJason Ekstrand ret = -EINVAL; 885d4433c76SJason Ekstrand else if (args->value) 886d4433c76SJason Ekstrand pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE); 887d4433c76SJason Ekstrand else 888d4433c76SJason Ekstrand pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE); 889d4433c76SJason Ekstrand break; 890d4433c76SJason Ekstrand 891d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_BANNABLE: 892d4433c76SJason Ekstrand if (args->size) 893d4433c76SJason Ekstrand ret = -EINVAL; 894d4433c76SJason Ekstrand else if (!capable(CAP_SYS_ADMIN) && !args->value) 895d4433c76SJason Ekstrand ret = -EPERM; 896d4433c76SJason Ekstrand else if (args->value) 897d4433c76SJason Ekstrand pc->user_flags |= BIT(UCONTEXT_BANNABLE); 898d3ac8d42SDaniele Ceraolo Spurio else if (pc->uses_protected_content) 899d3ac8d42SDaniele Ceraolo Spurio ret = -EPERM; 900d4433c76SJason Ekstrand else 901d4433c76SJason Ekstrand pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); 902d4433c76SJason Ekstrand break; 903d4433c76SJason Ekstrand 904d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_RECOVERABLE: 905d4433c76SJason Ekstrand if (args->size) 906d4433c76SJason Ekstrand ret = -EINVAL; 907d3ac8d42SDaniele Ceraolo Spurio else if (!args->value) 908d4433c76SJason Ekstrand pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); 909d3ac8d42SDaniele Ceraolo Spurio else if (pc->uses_protected_content) 910d3ac8d42SDaniele Ceraolo Spurio ret = -EPERM; 911d3ac8d42SDaniele Ceraolo Spurio else 912d3ac8d42SDaniele Ceraolo Spurio pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); 913d4433c76SJason Ekstrand break; 914d4433c76SJason Ekstrand 915d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_PRIORITY: 916d4433c76SJason Ekstrand ret = validate_priority(fpriv->dev_priv, args); 917d4433c76SJason Ekstrand if (!ret) 918d4433c76SJason Ekstrand pc->sched.priority = args->value; 919d4433c76SJason Ekstrand break; 920d4433c76SJason Ekstrand 921d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_SSEU: 922d4433c76SJason Ekstrand ret = set_proto_ctx_sseu(fpriv, pc, args); 923d4433c76SJason Ekstrand break; 924d4433c76SJason Ekstrand 925d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_VM: 926d4433c76SJason Ekstrand ret = set_proto_ctx_vm(fpriv, pc, args); 927d4433c76SJason Ekstrand break; 928d4433c76SJason Ekstrand 929d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_ENGINES: 930d4433c76SJason Ekstrand ret = set_proto_ctx_engines(fpriv, pc, args); 931d4433c76SJason Ekstrand break; 932d4433c76SJason Ekstrand 933d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_PERSISTENCE: 934d4433c76SJason Ekstrand if (args->size) 935d4433c76SJason Ekstrand ret = -EINVAL; 9367482a656Skatrinzhou else 937d4433c76SJason Ekstrand ret = proto_context_set_persistence(fpriv->dev_priv, pc, 938d4433c76SJason Ekstrand args->value); 939d4433c76SJason Ekstrand break; 940d4433c76SJason Ekstrand 941d3ac8d42SDaniele Ceraolo Spurio case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 942d3ac8d42SDaniele Ceraolo Spurio ret = proto_context_set_protected(fpriv->dev_priv, pc, 943d3ac8d42SDaniele Ceraolo Spurio args->value); 944d3ac8d42SDaniele Ceraolo Spurio break; 945d3ac8d42SDaniele Ceraolo Spurio 946d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_NO_ZEROMAP: 947d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_BAN_PERIOD: 948d4433c76SJason Ekstrand case I915_CONTEXT_PARAM_RINGSIZE: 949d4433c76SJason Ekstrand default: 950d4433c76SJason Ekstrand ret = -EINVAL; 951d4433c76SJason Ekstrand break; 952d4433c76SJason Ekstrand } 953d4433c76SJason Ekstrand 954d4433c76SJason Ekstrand return ret; 955d4433c76SJason Ekstrand } 956d4433c76SJason Ekstrand 957263ae12cSJason Ekstrand static int intel_context_set_gem(struct intel_context *ce, 958263ae12cSJason Ekstrand struct i915_gem_context *ctx, 959263ae12cSJason Ekstrand struct intel_sseu sseu) 960e6ba7648SChris Wilson { 961263ae12cSJason Ekstrand int ret = 0; 962263ae12cSJason Ekstrand 9636a8679c0SChris Wilson GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 9646a8679c0SChris Wilson RCU_INIT_POINTER(ce->gem_context, ctx); 965e6ba7648SChris Wilson 966e5e32171SMatthew Brost GEM_BUG_ON(intel_context_is_pinned(ce)); 96774e4b909SJason Ekstrand ce->ring_size = SZ_16K; 968e6ba7648SChris Wilson 969e6ba7648SChris Wilson i915_vm_put(ce->vm); 9700483a301SDaniel Vetter ce->vm = i915_gem_context_get_eb_vm(ctx); 971e6ba7648SChris Wilson 972e6ba7648SChris Wilson if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 9734dbd3944SMatthew Brost intel_engine_has_timeslices(ce->engine) && 9744dbd3944SMatthew Brost intel_engine_has_semaphores(ce->engine)) 975e6ba7648SChris Wilson __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 976e8dbb566STvrtko Ursulin 9771a839e01SLucas De Marchi if (CONFIG_DRM_I915_REQUEST_TIMEOUT && 978677db6adSJason Ekstrand ctx->i915->params.request_timeout_ms) { 979677db6adSJason Ekstrand unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; 980677db6adSJason Ekstrand 981677db6adSJason Ekstrand intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000); 982677db6adSJason Ekstrand } 983263ae12cSJason Ekstrand 984263ae12cSJason Ekstrand /* A valid SSEU has no zero fields */ 985263ae12cSJason Ekstrand if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) 986263ae12cSJason Ekstrand ret = intel_context_reconfigure_sseu(ce, sseu); 987263ae12cSJason Ekstrand 988263ae12cSJason Ekstrand return ret; 989e6ba7648SChris Wilson } 990e6ba7648SChris Wilson 991e5e32171SMatthew Brost static void __unpin_engines(struct i915_gem_engines *e, unsigned int count) 992e5e32171SMatthew Brost { 993e5e32171SMatthew Brost while (count--) { 994e5e32171SMatthew Brost struct intel_context *ce = e->engines[count], *child; 995e5e32171SMatthew Brost 996e5e32171SMatthew Brost if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) 997e5e32171SMatthew Brost continue; 998e5e32171SMatthew Brost 999e5e32171SMatthew Brost for_each_child(ce, child) 1000e5e32171SMatthew Brost intel_context_unpin(child); 1001e5e32171SMatthew Brost intel_context_unpin(ce); 1002e5e32171SMatthew Brost } 1003e5e32171SMatthew Brost } 1004e5e32171SMatthew Brost 1005e5e32171SMatthew Brost static void unpin_engines(struct i915_gem_engines *e) 1006e5e32171SMatthew Brost { 1007e5e32171SMatthew Brost __unpin_engines(e, e->num_engines); 1008e5e32171SMatthew Brost } 1009e5e32171SMatthew Brost 101010be98a7SChris Wilson static void __free_engines(struct i915_gem_engines *e, unsigned int count) 101110be98a7SChris Wilson { 101210be98a7SChris Wilson while (count--) { 101310be98a7SChris Wilson if (!e->engines[count]) 101410be98a7SChris Wilson continue; 101510be98a7SChris Wilson 101610be98a7SChris Wilson intel_context_put(e->engines[count]); 101710be98a7SChris Wilson } 101810be98a7SChris Wilson kfree(e); 101910be98a7SChris Wilson } 102010be98a7SChris Wilson 102110be98a7SChris Wilson static void free_engines(struct i915_gem_engines *e) 102210be98a7SChris Wilson { 102310be98a7SChris Wilson __free_engines(e, e->num_engines); 102410be98a7SChris Wilson } 102510be98a7SChris Wilson 1026155ab883SChris Wilson static void free_engines_rcu(struct rcu_head *rcu) 102710be98a7SChris Wilson { 1028130a95e9SChris Wilson struct i915_gem_engines *engines = 1029130a95e9SChris Wilson container_of(rcu, struct i915_gem_engines, rcu); 1030130a95e9SChris Wilson 1031130a95e9SChris Wilson i915_sw_fence_fini(&engines->fence); 1032130a95e9SChris Wilson free_engines(engines); 103310be98a7SChris Wilson } 103410be98a7SChris Wilson 10358399eec8STvrtko Ursulin static void accumulate_runtime(struct i915_drm_client *client, 10368399eec8STvrtko Ursulin struct i915_gem_engines *engines) 10378399eec8STvrtko Ursulin { 10388399eec8STvrtko Ursulin struct i915_gem_engines_iter it; 10398399eec8STvrtko Ursulin struct intel_context *ce; 10408399eec8STvrtko Ursulin 10418399eec8STvrtko Ursulin if (!client) 10428399eec8STvrtko Ursulin return; 10438399eec8STvrtko Ursulin 10448399eec8STvrtko Ursulin /* Transfer accumulated runtime to the parent GEM context. */ 10458399eec8STvrtko Ursulin for_each_gem_engine(ce, engines, it) { 10468399eec8STvrtko Ursulin unsigned int class = ce->engine->uabi_class; 10478399eec8STvrtko Ursulin 10488399eec8STvrtko Ursulin GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime)); 10498399eec8STvrtko Ursulin atomic64_add(intel_context_get_total_runtime_ns(ce), 10508399eec8STvrtko Ursulin &client->past_runtime[class]); 10518399eec8STvrtko Ursulin } 10528399eec8STvrtko Ursulin } 10538399eec8STvrtko Ursulin 105444505168SMatthew Brost static int 105570c96e39SChris Wilson engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 105670c96e39SChris Wilson { 105770c96e39SChris Wilson struct i915_gem_engines *engines = 105870c96e39SChris Wilson container_of(fence, typeof(*engines), fence); 10598399eec8STvrtko Ursulin struct i915_gem_context *ctx = engines->ctx; 106070c96e39SChris Wilson 106170c96e39SChris Wilson switch (state) { 106270c96e39SChris Wilson case FENCE_COMPLETE: 106370c96e39SChris Wilson if (!list_empty(&engines->link)) { 106470c96e39SChris Wilson unsigned long flags; 106570c96e39SChris Wilson 106670c96e39SChris Wilson spin_lock_irqsave(&ctx->stale.lock, flags); 106770c96e39SChris Wilson list_del(&engines->link); 106870c96e39SChris Wilson spin_unlock_irqrestore(&ctx->stale.lock, flags); 106970c96e39SChris Wilson } 10708399eec8STvrtko Ursulin accumulate_runtime(ctx->client, engines); 10718399eec8STvrtko Ursulin i915_gem_context_put(ctx); 10728399eec8STvrtko Ursulin 107370c96e39SChris Wilson break; 107470c96e39SChris Wilson 107570c96e39SChris Wilson case FENCE_FREE: 107670c96e39SChris Wilson init_rcu_head(&engines->rcu); 107770c96e39SChris Wilson call_rcu(&engines->rcu, free_engines_rcu); 107870c96e39SChris Wilson break; 107970c96e39SChris Wilson } 108070c96e39SChris Wilson 108170c96e39SChris Wilson return NOTIFY_DONE; 108270c96e39SChris Wilson } 108370c96e39SChris Wilson 108470c96e39SChris Wilson static struct i915_gem_engines *alloc_engines(unsigned int count) 108570c96e39SChris Wilson { 108670c96e39SChris Wilson struct i915_gem_engines *e; 108770c96e39SChris Wilson 108870c96e39SChris Wilson e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); 108970c96e39SChris Wilson if (!e) 109070c96e39SChris Wilson return NULL; 109170c96e39SChris Wilson 109270c96e39SChris Wilson i915_sw_fence_init(&e->fence, engines_notify); 109370c96e39SChris Wilson return e; 109470c96e39SChris Wilson } 109570c96e39SChris Wilson 1096263ae12cSJason Ekstrand static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx, 1097263ae12cSJason Ekstrand struct intel_sseu rcs_sseu) 109810be98a7SChris Wilson { 1099*1ec23ed7STvrtko Ursulin const unsigned int max = I915_NUM_ENGINES; 110010be98a7SChris Wilson struct intel_engine_cs *engine; 110107a635a8SJason Ekstrand struct i915_gem_engines *e, *err; 110210be98a7SChris Wilson 1103*1ec23ed7STvrtko Ursulin e = alloc_engines(max); 110410be98a7SChris Wilson if (!e) 110510be98a7SChris Wilson return ERR_PTR(-ENOMEM); 110610be98a7SChris Wilson 1107*1ec23ed7STvrtko Ursulin for_each_uabi_engine(engine, ctx->i915) { 110810be98a7SChris Wilson struct intel_context *ce; 1109263ae12cSJason Ekstrand struct intel_sseu sseu = {}; 1110263ae12cSJason Ekstrand int ret; 111110be98a7SChris Wilson 1112a50134b1STvrtko Ursulin if (engine->legacy_idx == INVALID_ENGINE) 1113a50134b1STvrtko Ursulin continue; 1114a50134b1STvrtko Ursulin 1115*1ec23ed7STvrtko Ursulin GEM_BUG_ON(engine->legacy_idx >= max); 1116a50134b1STvrtko Ursulin GEM_BUG_ON(e->engines[engine->legacy_idx]); 1117a50134b1STvrtko Ursulin 1118e6ba7648SChris Wilson ce = intel_context_create(engine); 111910be98a7SChris Wilson if (IS_ERR(ce)) { 112007a635a8SJason Ekstrand err = ERR_CAST(ce); 112107a635a8SJason Ekstrand goto free_engines; 112210be98a7SChris Wilson } 112310be98a7SChris Wilson 1124a50134b1STvrtko Ursulin e->engines[engine->legacy_idx] = ce; 112507a635a8SJason Ekstrand e->num_engines = max(e->num_engines, engine->legacy_idx + 1); 1126263ae12cSJason Ekstrand 1127263ae12cSJason Ekstrand if (engine->class == RENDER_CLASS) 1128263ae12cSJason Ekstrand sseu = rcs_sseu; 1129263ae12cSJason Ekstrand 1130263ae12cSJason Ekstrand ret = intel_context_set_gem(ce, ctx, sseu); 1131263ae12cSJason Ekstrand if (ret) { 1132263ae12cSJason Ekstrand err = ERR_PTR(ret); 1133263ae12cSJason Ekstrand goto free_engines; 1134263ae12cSJason Ekstrand } 1135263ae12cSJason Ekstrand 113610be98a7SChris Wilson } 113710be98a7SChris Wilson 113810be98a7SChris Wilson return e; 113907a635a8SJason Ekstrand 114007a635a8SJason Ekstrand free_engines: 114107a635a8SJason Ekstrand free_engines(e); 114207a635a8SJason Ekstrand return err; 114310be98a7SChris Wilson } 114410be98a7SChris Wilson 1145e5e32171SMatthew Brost static int perma_pin_contexts(struct intel_context *ce) 1146e5e32171SMatthew Brost { 1147e5e32171SMatthew Brost struct intel_context *child; 1148e5e32171SMatthew Brost int i = 0, j = 0, ret; 1149e5e32171SMatthew Brost 1150e5e32171SMatthew Brost GEM_BUG_ON(!intel_context_is_parent(ce)); 1151e5e32171SMatthew Brost 1152e5e32171SMatthew Brost ret = intel_context_pin(ce); 1153e5e32171SMatthew Brost if (unlikely(ret)) 1154e5e32171SMatthew Brost return ret; 1155e5e32171SMatthew Brost 1156e5e32171SMatthew Brost for_each_child(ce, child) { 1157e5e32171SMatthew Brost ret = intel_context_pin(child); 1158e5e32171SMatthew Brost if (unlikely(ret)) 1159e5e32171SMatthew Brost goto unwind; 1160e5e32171SMatthew Brost ++i; 1161e5e32171SMatthew Brost } 1162e5e32171SMatthew Brost 1163e5e32171SMatthew Brost set_bit(CONTEXT_PERMA_PIN, &ce->flags); 1164e5e32171SMatthew Brost 1165e5e32171SMatthew Brost return 0; 1166e5e32171SMatthew Brost 1167e5e32171SMatthew Brost unwind: 1168e5e32171SMatthew Brost intel_context_unpin(ce); 1169e5e32171SMatthew Brost for_each_child(ce, child) { 1170e5e32171SMatthew Brost if (j++ < i) 1171e5e32171SMatthew Brost intel_context_unpin(child); 1172e5e32171SMatthew Brost else 1173e5e32171SMatthew Brost break; 1174e5e32171SMatthew Brost } 1175e5e32171SMatthew Brost 1176e5e32171SMatthew Brost return ret; 1177e5e32171SMatthew Brost } 1178e5e32171SMatthew Brost 1179d4433c76SJason Ekstrand static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, 1180d4433c76SJason Ekstrand unsigned int num_engines, 1181d4433c76SJason Ekstrand struct i915_gem_proto_engine *pe) 1182d4433c76SJason Ekstrand { 1183d4433c76SJason Ekstrand struct i915_gem_engines *e, *err; 1184d4433c76SJason Ekstrand unsigned int n; 1185d4433c76SJason Ekstrand 1186d4433c76SJason Ekstrand e = alloc_engines(num_engines); 118784edf537SMatthew Brost if (!e) 118884edf537SMatthew Brost return ERR_PTR(-ENOMEM); 118984edf537SMatthew Brost e->num_engines = num_engines; 119084edf537SMatthew Brost 1191d4433c76SJason Ekstrand for (n = 0; n < num_engines; n++) { 1192e5e32171SMatthew Brost struct intel_context *ce, *child; 1193d4433c76SJason Ekstrand int ret; 1194d4433c76SJason Ekstrand 1195d4433c76SJason Ekstrand switch (pe[n].type) { 1196d4433c76SJason Ekstrand case I915_GEM_ENGINE_TYPE_PHYSICAL: 1197d4433c76SJason Ekstrand ce = intel_context_create(pe[n].engine); 1198d4433c76SJason Ekstrand break; 1199d4433c76SJason Ekstrand 1200d4433c76SJason Ekstrand case I915_GEM_ENGINE_TYPE_BALANCED: 120155612025SMatthew Brost ce = intel_engine_create_virtual(pe[n].siblings, 1202e5e32171SMatthew Brost pe[n].num_siblings, 0); 1203e5e32171SMatthew Brost break; 1204e5e32171SMatthew Brost 1205e5e32171SMatthew Brost case I915_GEM_ENGINE_TYPE_PARALLEL: 1206e5e32171SMatthew Brost ce = intel_engine_create_parallel(pe[n].siblings, 1207e5e32171SMatthew Brost pe[n].num_siblings, 1208e5e32171SMatthew Brost pe[n].width); 1209d4433c76SJason Ekstrand break; 1210d4433c76SJason Ekstrand 1211d4433c76SJason Ekstrand case I915_GEM_ENGINE_TYPE_INVALID: 1212d4433c76SJason Ekstrand default: 1213d4433c76SJason Ekstrand GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID); 1214d4433c76SJason Ekstrand continue; 1215d4433c76SJason Ekstrand } 1216d4433c76SJason Ekstrand 1217d4433c76SJason Ekstrand if (IS_ERR(ce)) { 1218d4433c76SJason Ekstrand err = ERR_CAST(ce); 1219d4433c76SJason Ekstrand goto free_engines; 1220d4433c76SJason Ekstrand } 1221d4433c76SJason Ekstrand 1222d4433c76SJason Ekstrand e->engines[n] = ce; 1223d4433c76SJason Ekstrand 1224d4433c76SJason Ekstrand ret = intel_context_set_gem(ce, ctx, pe->sseu); 1225d4433c76SJason Ekstrand if (ret) { 1226d4433c76SJason Ekstrand err = ERR_PTR(ret); 1227d4433c76SJason Ekstrand goto free_engines; 1228d4433c76SJason Ekstrand } 1229e5e32171SMatthew Brost for_each_child(ce, child) { 1230e5e32171SMatthew Brost ret = intel_context_set_gem(child, ctx, pe->sseu); 1231e5e32171SMatthew Brost if (ret) { 1232e5e32171SMatthew Brost err = ERR_PTR(ret); 1233e5e32171SMatthew Brost goto free_engines; 1234e5e32171SMatthew Brost } 1235e5e32171SMatthew Brost } 1236e5e32171SMatthew Brost 1237e5e32171SMatthew Brost /* 1238e5e32171SMatthew Brost * XXX: Must be done after calling intel_context_set_gem as that 1239e5e32171SMatthew Brost * function changes the ring size. The ring is allocated when 1240e5e32171SMatthew Brost * the context is pinned. If the ring size is changed after 1241e5e32171SMatthew Brost * allocation we have a mismatch of the ring size and will cause 1242e5e32171SMatthew Brost * the context to hang. Presumably with a bit of reordering we 1243e5e32171SMatthew Brost * could move the perma-pin step to the backend function 1244e5e32171SMatthew Brost * intel_engine_create_parallel. 1245e5e32171SMatthew Brost */ 1246e5e32171SMatthew Brost if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) { 1247e5e32171SMatthew Brost ret = perma_pin_contexts(ce); 1248e5e32171SMatthew Brost if (ret) { 1249e5e32171SMatthew Brost err = ERR_PTR(ret); 1250e5e32171SMatthew Brost goto free_engines; 1251e5e32171SMatthew Brost } 1252e5e32171SMatthew Brost } 1253d4433c76SJason Ekstrand } 1254d4433c76SJason Ekstrand 1255d4433c76SJason Ekstrand return e; 1256d4433c76SJason Ekstrand 1257d4433c76SJason Ekstrand free_engines: 1258d4433c76SJason Ekstrand free_engines(e); 1259d4433c76SJason Ekstrand return err; 1260d4433c76SJason Ekstrand } 1261d4433c76SJason Ekstrand 126275eefd82SDaniel Vetter static void i915_gem_context_release_work(struct work_struct *work) 126310be98a7SChris Wilson { 126475eefd82SDaniel Vetter struct i915_gem_context *ctx = container_of(work, typeof(*ctx), 126575eefd82SDaniel Vetter release_work); 12668cf97637SDaniel Vetter struct i915_address_space *vm; 126710be98a7SChris Wilson 1268f8246cf4SChris Wilson trace_i915_context_free(ctx); 1269f8246cf4SChris Wilson GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 1270a4e7ccdaSChris Wilson 1271ad3aa7c3SChris Wilson spin_lock(&ctx->i915->gem.contexts.lock); 1272ad3aa7c3SChris Wilson list_del(&ctx->link); 1273ad3aa7c3SChris Wilson spin_unlock(&ctx->i915->gem.contexts.lock); 1274ad3aa7c3SChris Wilson 1275c238980eSDaniel Vetter if (ctx->syncobj) 1276c238980eSDaniel Vetter drm_syncobj_put(ctx->syncobj); 1277c238980eSDaniel Vetter 12789ec8795eSDaniel Vetter vm = ctx->vm; 12798cf97637SDaniel Vetter if (vm) 12808cf97637SDaniel Vetter i915_vm_put(vm); 12818cf97637SDaniel Vetter 1282d3ac8d42SDaniele Ceraolo Spurio if (ctx->pxp_wakeref) 1283d3ac8d42SDaniele Ceraolo Spurio intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref); 1284d3ac8d42SDaniele Ceraolo Spurio 128543c50460STvrtko Ursulin if (ctx->client) 128643c50460STvrtko Ursulin i915_drm_client_put(ctx->client); 128743c50460STvrtko Ursulin 128810be98a7SChris Wilson mutex_destroy(&ctx->engines_mutex); 1289f7ce8639SChris Wilson mutex_destroy(&ctx->lut_mutex); 129010be98a7SChris Wilson 129110be98a7SChris Wilson put_pid(ctx->pid); 129210be98a7SChris Wilson mutex_destroy(&ctx->mutex); 129310be98a7SChris Wilson 129410be98a7SChris Wilson kfree_rcu(ctx, rcu); 129510be98a7SChris Wilson } 129610be98a7SChris Wilson 129775eefd82SDaniel Vetter void i915_gem_context_release(struct kref *ref) 129875eefd82SDaniel Vetter { 129975eefd82SDaniel Vetter struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 130075eefd82SDaniel Vetter 130175eefd82SDaniel Vetter queue_work(ctx->i915->wq, &ctx->release_work); 130275eefd82SDaniel Vetter } 130375eefd82SDaniel Vetter 13042e0986a5SChris Wilson static inline struct i915_gem_engines * 13052e0986a5SChris Wilson __context_engines_static(const struct i915_gem_context *ctx) 13062e0986a5SChris Wilson { 13072e0986a5SChris Wilson return rcu_dereference_protected(ctx->engines, true); 13082e0986a5SChris Wilson } 13092e0986a5SChris Wilson 13102e0986a5SChris Wilson static void __reset_context(struct i915_gem_context *ctx, 13112e0986a5SChris Wilson struct intel_engine_cs *engine) 13122e0986a5SChris Wilson { 13132e0986a5SChris Wilson intel_gt_handle_error(engine->gt, engine->mask, 0, 13142e0986a5SChris Wilson "context closure in %s", ctx->name); 13152e0986a5SChris Wilson } 13162e0986a5SChris Wilson 13172e0986a5SChris Wilson static bool __cancel_engine(struct intel_engine_cs *engine) 13182e0986a5SChris Wilson { 13192e0986a5SChris Wilson /* 13202e0986a5SChris Wilson * Send a "high priority pulse" down the engine to cause the 13212e0986a5SChris Wilson * current request to be momentarily preempted. (If it fails to 13222e0986a5SChris Wilson * be preempted, it will be reset). As we have marked our context 13232e0986a5SChris Wilson * as banned, any incomplete request, including any running, will 13242e0986a5SChris Wilson * be skipped following the preemption. 13252e0986a5SChris Wilson * 13262e0986a5SChris Wilson * If there is no hangchecking (one of the reasons why we try to 13272e0986a5SChris Wilson * cancel the context) and no forced preemption, there may be no 13282e0986a5SChris Wilson * means by which we reset the GPU and evict the persistent hog. 13292e0986a5SChris Wilson * Ergo if we are unable to inject a preemptive pulse that can 13302e0986a5SChris Wilson * kill the banned context, we fallback to doing a local reset 13312e0986a5SChris Wilson * instead. 13322e0986a5SChris Wilson */ 1333651dabe2SChris Wilson return intel_engine_pulse(engine) == 0; 13342e0986a5SChris Wilson } 13352e0986a5SChris Wilson 13364a317415SChris Wilson static struct intel_engine_cs *active_engine(struct intel_context *ce) 13374a317415SChris Wilson { 13384a317415SChris Wilson struct intel_engine_cs *engine = NULL; 13394a317415SChris Wilson struct i915_request *rq; 13404a317415SChris Wilson 1341cc1557caSChris Wilson if (intel_context_has_inflight(ce)) 1342cc1557caSChris Wilson return intel_context_inflight(ce); 1343cc1557caSChris Wilson 13444a317415SChris Wilson if (!ce->timeline) 13454a317415SChris Wilson return NULL; 13464a317415SChris Wilson 13473cfea8c9SChris Wilson /* 13483cfea8c9SChris Wilson * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference 13493cfea8c9SChris Wilson * to the request to prevent it being transferred to a new timeline 13503cfea8c9SChris Wilson * (and onto a new timeline->requests list). 13513cfea8c9SChris Wilson */ 1352736e785fSChris Wilson rcu_read_lock(); 13533cfea8c9SChris Wilson list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 13543cfea8c9SChris Wilson bool found; 13553cfea8c9SChris Wilson 13563cfea8c9SChris Wilson /* timeline is already completed upto this point? */ 13573cfea8c9SChris Wilson if (!i915_request_get_rcu(rq)) 13583cfea8c9SChris Wilson break; 13594a317415SChris Wilson 13604a317415SChris Wilson /* Check with the backend if the request is inflight */ 13613cfea8c9SChris Wilson found = true; 13623cfea8c9SChris Wilson if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) 13637dbc19daSTvrtko Ursulin found = i915_request_active_engine(rq, &engine); 13643cfea8c9SChris Wilson 13653cfea8c9SChris Wilson i915_request_put(rq); 13663cfea8c9SChris Wilson if (found) 13674a317415SChris Wilson break; 13684a317415SChris Wilson } 1369736e785fSChris Wilson rcu_read_unlock(); 13704a317415SChris Wilson 13714a317415SChris Wilson return engine; 13724a317415SChris Wilson } 13734a317415SChris Wilson 137445c64ecfSTvrtko Ursulin static void 137545c64ecfSTvrtko Ursulin kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent) 13762e0986a5SChris Wilson { 13772e0986a5SChris Wilson struct i915_gem_engines_iter it; 13782e0986a5SChris Wilson struct intel_context *ce; 13792e0986a5SChris Wilson 13802e0986a5SChris Wilson /* 13812e0986a5SChris Wilson * Map the user's engine back to the actual engines; one virtual 13822e0986a5SChris Wilson * engine will be mapped to multiple engines, and using ctx->engine[] 13832e0986a5SChris Wilson * the same engine may be have multiple instances in the user's map. 13842e0986a5SChris Wilson * However, we only care about pending requests, so only include 13852e0986a5SChris Wilson * engines on which there are incomplete requests. 13862e0986a5SChris Wilson */ 138742fb60deSChris Wilson for_each_gem_engine(ce, engines, it) { 13882e0986a5SChris Wilson struct intel_engine_cs *engine; 13892e0986a5SChris Wilson 13900add082cSTvrtko Ursulin if ((exit || !persistent) && intel_context_revoke(ce)) 139145c64ecfSTvrtko Ursulin continue; /* Already marked. */ 13929f3ccd40SChris Wilson 13934a317415SChris Wilson /* 13944a317415SChris Wilson * Check the current active state of this context; if we 13954a317415SChris Wilson * are currently executing on the GPU we need to evict 13964a317415SChris Wilson * ourselves. On the other hand, if we haven't yet been 13974a317415SChris Wilson * submitted to the GPU or if everything is complete, 13984a317415SChris Wilson * we have nothing to do. 13994a317415SChris Wilson */ 14004a317415SChris Wilson engine = active_engine(ce); 14012e0986a5SChris Wilson 14022e0986a5SChris Wilson /* First attempt to gracefully cancel the context */ 140345c64ecfSTvrtko Ursulin if (engine && !__cancel_engine(engine) && (exit || !persistent)) 14042e0986a5SChris Wilson /* 14052e0986a5SChris Wilson * If we are unable to send a preemptive pulse to bump 14062e0986a5SChris Wilson * the context from the GPU, we have to resort to a full 14072e0986a5SChris Wilson * reset. We hope the collateral damage is worth it. 14082e0986a5SChris Wilson */ 140942fb60deSChris Wilson __reset_context(engines->ctx, engine); 14102e0986a5SChris Wilson } 14112e0986a5SChris Wilson } 14122e0986a5SChris Wilson 1413651dabe2SChris Wilson static void kill_context(struct i915_gem_context *ctx) 141442fb60deSChris Wilson { 141542fb60deSChris Wilson struct i915_gem_engines *pos, *next; 141642fb60deSChris Wilson 1417130a95e9SChris Wilson spin_lock_irq(&ctx->stale.lock); 1418130a95e9SChris Wilson GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 141942fb60deSChris Wilson list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { 1420130a95e9SChris Wilson if (!i915_sw_fence_await(&pos->fence)) { 1421130a95e9SChris Wilson list_del_init(&pos->link); 142242fb60deSChris Wilson continue; 1423130a95e9SChris Wilson } 142442fb60deSChris Wilson 1425130a95e9SChris Wilson spin_unlock_irq(&ctx->stale.lock); 142642fb60deSChris Wilson 142745c64ecfSTvrtko Ursulin kill_engines(pos, !ctx->i915->params.enable_hangcheck, 142845c64ecfSTvrtko Ursulin i915_gem_context_is_persistent(ctx)); 142942fb60deSChris Wilson 1430130a95e9SChris Wilson spin_lock_irq(&ctx->stale.lock); 1431130a95e9SChris Wilson GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); 143242fb60deSChris Wilson list_safe_reset_next(pos, next, link); 143342fb60deSChris Wilson list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ 143442fb60deSChris Wilson 143542fb60deSChris Wilson i915_sw_fence_complete(&pos->fence); 143642fb60deSChris Wilson } 1437130a95e9SChris Wilson spin_unlock_irq(&ctx->stale.lock); 143842fb60deSChris Wilson } 143942fb60deSChris Wilson 1440130a95e9SChris Wilson static void engines_idle_release(struct i915_gem_context *ctx, 1441130a95e9SChris Wilson struct i915_gem_engines *engines) 1442130a95e9SChris Wilson { 1443130a95e9SChris Wilson struct i915_gem_engines_iter it; 1444130a95e9SChris Wilson struct intel_context *ce; 1445130a95e9SChris Wilson 1446130a95e9SChris Wilson INIT_LIST_HEAD(&engines->link); 1447130a95e9SChris Wilson 1448130a95e9SChris Wilson engines->ctx = i915_gem_context_get(ctx); 1449130a95e9SChris Wilson 1450130a95e9SChris Wilson for_each_gem_engine(ce, engines, it) { 1451e6829625SChris Wilson int err; 1452130a95e9SChris Wilson 1453130a95e9SChris Wilson /* serialises with execbuf */ 145483321094SMatthew Brost intel_context_close(ce); 1455130a95e9SChris Wilson if (!intel_context_pin_if_active(ce)) 1456130a95e9SChris Wilson continue; 1457130a95e9SChris Wilson 1458e6829625SChris Wilson /* Wait until context is finally scheduled out and retired */ 1459e6829625SChris Wilson err = i915_sw_fence_await_active(&engines->fence, 1460e6829625SChris Wilson &ce->active, 1461e6829625SChris Wilson I915_ACTIVE_AWAIT_BARRIER); 1462130a95e9SChris Wilson intel_context_unpin(ce); 1463e6829625SChris Wilson if (err) 1464130a95e9SChris Wilson goto kill; 1465130a95e9SChris Wilson } 1466130a95e9SChris Wilson 1467130a95e9SChris Wilson spin_lock_irq(&ctx->stale.lock); 1468130a95e9SChris Wilson if (!i915_gem_context_is_closed(ctx)) 1469130a95e9SChris Wilson list_add_tail(&engines->link, &ctx->stale.engines); 1470130a95e9SChris Wilson spin_unlock_irq(&ctx->stale.lock); 1471130a95e9SChris Wilson 1472130a95e9SChris Wilson kill: 1473130a95e9SChris Wilson if (list_empty(&engines->link)) /* raced, already closed */ 147445c64ecfSTvrtko Ursulin kill_engines(engines, true, 147545c64ecfSTvrtko Ursulin i915_gem_context_is_persistent(ctx)); 1476130a95e9SChris Wilson 1477130a95e9SChris Wilson i915_sw_fence_commit(&engines->fence); 147842fb60deSChris Wilson } 147942fb60deSChris Wilson 1480267c0126SChris Wilson static void set_closed_name(struct i915_gem_context *ctx) 1481267c0126SChris Wilson { 1482267c0126SChris Wilson char *s; 1483267c0126SChris Wilson 1484267c0126SChris Wilson /* Replace '[]' with '<>' to indicate closed in debug prints */ 1485267c0126SChris Wilson 1486267c0126SChris Wilson s = strrchr(ctx->name, '['); 1487267c0126SChris Wilson if (!s) 1488267c0126SChris Wilson return; 1489267c0126SChris Wilson 1490267c0126SChris Wilson *s = '<'; 1491267c0126SChris Wilson 1492267c0126SChris Wilson s = strchr(s + 1, ']'); 1493267c0126SChris Wilson if (s) 1494267c0126SChris Wilson *s = '>'; 1495267c0126SChris Wilson } 1496267c0126SChris Wilson 149710be98a7SChris Wilson static void context_close(struct i915_gem_context *ctx) 149810be98a7SChris Wilson { 149949bd54b3STvrtko Ursulin struct i915_drm_client *client; 150049bd54b3STvrtko Ursulin 1501130a95e9SChris Wilson /* Flush any concurrent set_engines() */ 1502130a95e9SChris Wilson mutex_lock(&ctx->engines_mutex); 1503e5e32171SMatthew Brost unpin_engines(__context_engines_static(ctx)); 1504130a95e9SChris Wilson engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); 15052850748eSChris Wilson i915_gem_context_set_closed(ctx); 1506130a95e9SChris Wilson mutex_unlock(&ctx->engines_mutex); 15072850748eSChris Wilson 1508155ab883SChris Wilson mutex_lock(&ctx->mutex); 1509155ab883SChris Wilson 1510130a95e9SChris Wilson set_closed_name(ctx); 1511130a95e9SChris Wilson 151210be98a7SChris Wilson /* 151310be98a7SChris Wilson * The LUT uses the VMA as a backpointer to unref the object, 151410be98a7SChris Wilson * so we need to clear the LUT before we close all the VMA (inside 151510be98a7SChris Wilson * the ppgtt). 151610be98a7SChris Wilson */ 151710be98a7SChris Wilson lut_close(ctx); 151810be98a7SChris Wilson 1519e1a7ab4fSThomas Hellström ctx->file_priv = ERR_PTR(-EBADF); 1520e1a7ab4fSThomas Hellström 152149bd54b3STvrtko Ursulin client = ctx->client; 152249bd54b3STvrtko Ursulin if (client) { 152349bd54b3STvrtko Ursulin spin_lock(&client->ctx_lock); 152449bd54b3STvrtko Ursulin list_del_rcu(&ctx->client_link); 152549bd54b3STvrtko Ursulin spin_unlock(&client->ctx_lock); 152649bd54b3STvrtko Ursulin } 152749bd54b3STvrtko Ursulin 1528155ab883SChris Wilson mutex_unlock(&ctx->mutex); 15292e0986a5SChris Wilson 15302e0986a5SChris Wilson /* 15312e0986a5SChris Wilson * If the user has disabled hangchecking, we can not be sure that 15322e0986a5SChris Wilson * the batches will ever complete after the context is closed, 15332e0986a5SChris Wilson * keeping the context and all resources pinned forever. So in this 15342e0986a5SChris Wilson * case we opt to forcibly kill off all remaining requests on 15352e0986a5SChris Wilson * context close. 15362e0986a5SChris Wilson */ 15372e0986a5SChris Wilson kill_context(ctx); 15382e0986a5SChris Wilson 153910be98a7SChris Wilson i915_gem_context_put(ctx); 154010be98a7SChris Wilson } 154110be98a7SChris Wilson 1542a0e04715SChris Wilson static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 1543a0e04715SChris Wilson { 1544a0e04715SChris Wilson if (i915_gem_context_is_persistent(ctx) == state) 1545a0e04715SChris Wilson return 0; 1546a0e04715SChris Wilson 1547a0e04715SChris Wilson if (state) { 1548a0e04715SChris Wilson /* 1549a0e04715SChris Wilson * Only contexts that are short-lived [that will expire or be 1550a0e04715SChris Wilson * reset] are allowed to survive past termination. We require 1551a0e04715SChris Wilson * hangcheck to ensure that the persistent requests are healthy. 1552a0e04715SChris Wilson */ 15538a25c4beSJani Nikula if (!ctx->i915->params.enable_hangcheck) 1554a0e04715SChris Wilson return -EINVAL; 1555a0e04715SChris Wilson 1556a0e04715SChris Wilson i915_gem_context_set_persistence(ctx); 1557a0e04715SChris Wilson } else { 1558a0e04715SChris Wilson /* To cancel a context we use "preempt-to-idle" */ 1559a0e04715SChris Wilson if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 1560a0e04715SChris Wilson return -ENODEV; 1561a0e04715SChris Wilson 1562d1b9b5f1SChris Wilson /* 1563d1b9b5f1SChris Wilson * If the cancel fails, we then need to reset, cleanly! 1564d1b9b5f1SChris Wilson * 1565d1b9b5f1SChris Wilson * If the per-engine reset fails, all hope is lost! We resort 1566d1b9b5f1SChris Wilson * to a full GPU reset in that unlikely case, but realistically 1567d1b9b5f1SChris Wilson * if the engine could not reset, the full reset does not fare 1568d1b9b5f1SChris Wilson * much better. The damage has been done. 1569d1b9b5f1SChris Wilson * 1570d1b9b5f1SChris Wilson * However, if we cannot reset an engine by itself, we cannot 1571d1b9b5f1SChris Wilson * cleanup a hanging persistent context without causing 1572d1b9b5f1SChris Wilson * colateral damage, and we should not pretend we can by 1573d1b9b5f1SChris Wilson * exposing the interface. 1574d1b9b5f1SChris Wilson */ 15751a9c4db4SMichał Winiarski if (!intel_has_reset_engine(to_gt(ctx->i915))) 1576d1b9b5f1SChris Wilson return -ENODEV; 1577d1b9b5f1SChris Wilson 1578a0e04715SChris Wilson i915_gem_context_clear_persistence(ctx); 1579a0e04715SChris Wilson } 1580a0e04715SChris Wilson 1581a0e04715SChris Wilson return 0; 1582a0e04715SChris Wilson } 1583a0e04715SChris Wilson 158410be98a7SChris Wilson static struct i915_gem_context * 1585a34857dcSJason Ekstrand i915_gem_create_context(struct drm_i915_private *i915, 1586a34857dcSJason Ekstrand const struct i915_gem_proto_context *pc) 158710be98a7SChris Wilson { 158810be98a7SChris Wilson struct i915_gem_context *ctx; 15890eee9977SJason Ekstrand struct i915_address_space *vm = NULL; 15900eee9977SJason Ekstrand struct i915_gem_engines *e; 15910eee9977SJason Ekstrand int err; 15920eee9977SJason Ekstrand int i; 159310be98a7SChris Wilson 15940eee9977SJason Ekstrand ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 15950eee9977SJason Ekstrand if (!ctx) 15960eee9977SJason Ekstrand return ERR_PTR(-ENOMEM); 15970eee9977SJason Ekstrand 15980eee9977SJason Ekstrand kref_init(&ctx->ref); 15990eee9977SJason Ekstrand ctx->i915 = i915; 16000eee9977SJason Ekstrand ctx->sched = pc->sched; 16010eee9977SJason Ekstrand mutex_init(&ctx->mutex); 16020eee9977SJason Ekstrand INIT_LIST_HEAD(&ctx->link); 160375eefd82SDaniel Vetter INIT_WORK(&ctx->release_work, i915_gem_context_release_work); 16040eee9977SJason Ekstrand 16050eee9977SJason Ekstrand spin_lock_init(&ctx->stale.lock); 16060eee9977SJason Ekstrand INIT_LIST_HEAD(&ctx->stale.engines); 160710be98a7SChris Wilson 1608a34857dcSJason Ekstrand if (pc->vm) { 16090eee9977SJason Ekstrand vm = i915_vm_get(pc->vm); 1610a34857dcSJason Ekstrand } else if (HAS_FULL_PPGTT(i915)) { 1611ab53497bSChris Wilson struct i915_ppgtt *ppgtt; 161210be98a7SChris Wilson 16131a9c4db4SMichał Winiarski ppgtt = i915_ppgtt_create(to_gt(i915), 0); 161410be98a7SChris Wilson if (IS_ERR(ppgtt)) { 1615baa89ba3SWambui Karuga drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 161610be98a7SChris Wilson PTR_ERR(ppgtt)); 16170eee9977SJason Ekstrand err = PTR_ERR(ppgtt); 16180eee9977SJason Ekstrand goto err_ctx; 16190eee9977SJason Ekstrand } 16200eee9977SJason Ekstrand vm = &ppgtt->vm; 16210eee9977SJason Ekstrand } 1622e1a7ab4fSThomas Hellström if (vm) 1623e1a7ab4fSThomas Hellström ctx->vm = vm; 162410be98a7SChris Wilson 16250eee9977SJason Ekstrand mutex_init(&ctx->engines_mutex); 1626d4433c76SJason Ekstrand if (pc->num_user_engines >= 0) { 1627d4433c76SJason Ekstrand i915_gem_context_set_user_engines(ctx); 16280eee9977SJason Ekstrand e = user_engines(ctx, pc->num_user_engines, pc->user_engines); 16290eee9977SJason Ekstrand } else { 16300eee9977SJason Ekstrand i915_gem_context_clear_user_engines(ctx); 16310eee9977SJason Ekstrand e = default_engines(ctx, pc->legacy_rcs_sseu); 1632d4433c76SJason Ekstrand } 16330eee9977SJason Ekstrand if (IS_ERR(e)) { 16340eee9977SJason Ekstrand err = PTR_ERR(e); 16350eee9977SJason Ekstrand goto err_vm; 16360eee9977SJason Ekstrand } 16370eee9977SJason Ekstrand RCU_INIT_POINTER(ctx->engines, e); 16380eee9977SJason Ekstrand 16390eee9977SJason Ekstrand INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 16400eee9977SJason Ekstrand mutex_init(&ctx->lut_mutex); 16410eee9977SJason Ekstrand 16420eee9977SJason Ekstrand /* NB: Mark all slices as needing a remap so that when the context first 16430eee9977SJason Ekstrand * loads it will restore whatever remap state already exists. If there 16440eee9977SJason Ekstrand * is no remap info, it will be a NOP. */ 16450eee9977SJason Ekstrand ctx->remap_slice = ALL_L3_SLICES(i915); 16460eee9977SJason Ekstrand 16470eee9977SJason Ekstrand ctx->user_flags = pc->user_flags; 16480eee9977SJason Ekstrand 16490eee9977SJason Ekstrand for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 16500eee9977SJason Ekstrand ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 1651d4433c76SJason Ekstrand 1652a34857dcSJason Ekstrand if (pc->single_timeline) { 16530eee9977SJason Ekstrand err = drm_syncobj_create(&ctx->syncobj, 165400dae4d3SJason Ekstrand DRM_SYNCOBJ_CREATE_SIGNALED, 165500dae4d3SJason Ekstrand NULL); 16560eee9977SJason Ekstrand if (err) 16570eee9977SJason Ekstrand goto err_engines; 165810be98a7SChris Wilson } 165910be98a7SChris Wilson 1660d3ac8d42SDaniele Ceraolo Spurio if (pc->uses_protected_content) { 1661d3ac8d42SDaniele Ceraolo Spurio ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1662d3ac8d42SDaniele Ceraolo Spurio ctx->uses_protected_content = true; 1663d3ac8d42SDaniele Ceraolo Spurio } 1664d3ac8d42SDaniele Ceraolo Spurio 166510be98a7SChris Wilson trace_i915_context_create(ctx); 166610be98a7SChris Wilson 166710be98a7SChris Wilson return ctx; 16680eee9977SJason Ekstrand 16690eee9977SJason Ekstrand err_engines: 16700eee9977SJason Ekstrand free_engines(e); 16710eee9977SJason Ekstrand err_vm: 16720eee9977SJason Ekstrand if (ctx->vm) 1673e1a7ab4fSThomas Hellström i915_vm_put(ctx->vm); 16740eee9977SJason Ekstrand err_ctx: 16750eee9977SJason Ekstrand kfree(ctx); 16760eee9977SJason Ekstrand return ERR_PTR(err); 167710be98a7SChris Wilson } 167810be98a7SChris Wilson 1679a4e7ccdaSChris Wilson static void init_contexts(struct i915_gem_contexts *gc) 168010be98a7SChris Wilson { 1681a4e7ccdaSChris Wilson spin_lock_init(&gc->lock); 1682a4e7ccdaSChris Wilson INIT_LIST_HEAD(&gc->list); 168310be98a7SChris Wilson } 168410be98a7SChris Wilson 1685e6ba7648SChris Wilson void i915_gem_init__contexts(struct drm_i915_private *i915) 168610be98a7SChris Wilson { 1687a4e7ccdaSChris Wilson init_contexts(&i915->gem.contexts); 168810be98a7SChris Wilson } 168910be98a7SChris Wilson 1690bed4b455SRob Clark /* 1691bed4b455SRob Clark * Note that this implicitly consumes the ctx reference, by placing 1692bed4b455SRob Clark * the ctx in the context_xa. 1693bed4b455SRob Clark */ 1694a4c1cdd3SJason Ekstrand static void gem_context_register(struct i915_gem_context *ctx, 1695c100777cSTvrtko Ursulin struct drm_i915_file_private *fpriv, 1696a4c1cdd3SJason Ekstrand u32 id) 169710be98a7SChris Wilson { 1698eb4dedaeSChris Wilson struct drm_i915_private *i915 = ctx->i915; 1699a4c1cdd3SJason Ekstrand void *old; 170010be98a7SChris Wilson 170110be98a7SChris Wilson ctx->file_priv = fpriv; 1702a4e7ccdaSChris Wilson 170310be98a7SChris Wilson ctx->pid = get_task_pid(current, PIDTYPE_PID); 170443c50460STvrtko Ursulin ctx->client = i915_drm_client_get(fpriv->client); 170543c50460STvrtko Ursulin 1706fc4f125dSChris Wilson snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 170710be98a7SChris Wilson current->comm, pid_nr(ctx->pid)); 170810be98a7SChris Wilson 170949bd54b3STvrtko Ursulin spin_lock(&ctx->client->ctx_lock); 171049bd54b3STvrtko Ursulin list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list); 171149bd54b3STvrtko Ursulin spin_unlock(&ctx->client->ctx_lock); 171249bd54b3STvrtko Ursulin 1713eb4dedaeSChris Wilson spin_lock(&i915->gem.contexts.lock); 1714eb4dedaeSChris Wilson list_add_tail(&ctx->link, &i915->gem.contexts.list); 1715eb4dedaeSChris Wilson spin_unlock(&i915->gem.contexts.lock); 1716bed4b455SRob Clark 1717bed4b455SRob Clark /* And finally expose ourselves to userspace via the idr */ 1718bed4b455SRob Clark old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); 1719bed4b455SRob Clark WARN_ON(old); 172010be98a7SChris Wilson } 172110be98a7SChris Wilson 172210be98a7SChris Wilson int i915_gem_context_open(struct drm_i915_private *i915, 172310be98a7SChris Wilson struct drm_file *file) 172410be98a7SChris Wilson { 172510be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 1726a34857dcSJason Ekstrand struct i915_gem_proto_context *pc; 172710be98a7SChris Wilson struct i915_gem_context *ctx; 172810be98a7SChris Wilson int err; 172910be98a7SChris Wilson 1730a4c1cdd3SJason Ekstrand mutex_init(&file_priv->proto_context_lock); 1731a4c1cdd3SJason Ekstrand xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC); 1732a4c1cdd3SJason Ekstrand 1733a4c1cdd3SJason Ekstrand /* 0 reserved for the default context */ 1734a4c1cdd3SJason Ekstrand xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1); 1735c100777cSTvrtko Ursulin 17365dbd2b7bSChris Wilson /* 0 reserved for invalid/unassigned ppgtt */ 17375dbd2b7bSChris Wilson xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 173810be98a7SChris Wilson 1739a34857dcSJason Ekstrand pc = proto_context_create(i915, 0); 1740a34857dcSJason Ekstrand if (IS_ERR(pc)) { 1741a34857dcSJason Ekstrand err = PTR_ERR(pc); 1742a34857dcSJason Ekstrand goto err; 1743a34857dcSJason Ekstrand } 1744a34857dcSJason Ekstrand 1745a34857dcSJason Ekstrand ctx = i915_gem_create_context(i915, pc); 1746d3ac8d42SDaniele Ceraolo Spurio proto_context_close(i915, pc); 174710be98a7SChris Wilson if (IS_ERR(ctx)) { 174810be98a7SChris Wilson err = PTR_ERR(ctx); 174910be98a7SChris Wilson goto err; 175010be98a7SChris Wilson } 175110be98a7SChris Wilson 1752a4c1cdd3SJason Ekstrand gem_context_register(ctx, file_priv, 0); 175310be98a7SChris Wilson 175410be98a7SChris Wilson return 0; 175510be98a7SChris Wilson 175610be98a7SChris Wilson err: 17575dbd2b7bSChris Wilson xa_destroy(&file_priv->vm_xa); 1758c100777cSTvrtko Ursulin xa_destroy(&file_priv->context_xa); 1759a4c1cdd3SJason Ekstrand xa_destroy(&file_priv->proto_context_xa); 1760a4c1cdd3SJason Ekstrand mutex_destroy(&file_priv->proto_context_lock); 176110be98a7SChris Wilson return err; 176210be98a7SChris Wilson } 176310be98a7SChris Wilson 176410be98a7SChris Wilson void i915_gem_context_close(struct drm_file *file) 176510be98a7SChris Wilson { 176610be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 1767a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc; 17685dbd2b7bSChris Wilson struct i915_address_space *vm; 1769c100777cSTvrtko Ursulin struct i915_gem_context *ctx; 1770c100777cSTvrtko Ursulin unsigned long idx; 177110be98a7SChris Wilson 1772a4c1cdd3SJason Ekstrand xa_for_each(&file_priv->proto_context_xa, idx, pc) 1773d3ac8d42SDaniele Ceraolo Spurio proto_context_close(file_priv->dev_priv, pc); 1774a4c1cdd3SJason Ekstrand xa_destroy(&file_priv->proto_context_xa); 1775a4c1cdd3SJason Ekstrand mutex_destroy(&file_priv->proto_context_lock); 1776a4c1cdd3SJason Ekstrand 1777c100777cSTvrtko Ursulin xa_for_each(&file_priv->context_xa, idx, ctx) 1778c100777cSTvrtko Ursulin context_close(ctx); 1779c100777cSTvrtko Ursulin xa_destroy(&file_priv->context_xa); 178010be98a7SChris Wilson 17815dbd2b7bSChris Wilson xa_for_each(&file_priv->vm_xa, idx, vm) 17825dbd2b7bSChris Wilson i915_vm_put(vm); 17835dbd2b7bSChris Wilson xa_destroy(&file_priv->vm_xa); 178410be98a7SChris Wilson } 178510be98a7SChris Wilson 178610be98a7SChris Wilson int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 178710be98a7SChris Wilson struct drm_file *file) 178810be98a7SChris Wilson { 178910be98a7SChris Wilson struct drm_i915_private *i915 = to_i915(dev); 179010be98a7SChris Wilson struct drm_i915_gem_vm_control *args = data; 179110be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 1792ab53497bSChris Wilson struct i915_ppgtt *ppgtt; 17935dbd2b7bSChris Wilson u32 id; 179410be98a7SChris Wilson int err; 179510be98a7SChris Wilson 179610be98a7SChris Wilson if (!HAS_FULL_PPGTT(i915)) 179710be98a7SChris Wilson return -ENODEV; 179810be98a7SChris Wilson 179910be98a7SChris Wilson if (args->flags) 180010be98a7SChris Wilson return -EINVAL; 180110be98a7SChris Wilson 18021a9c4db4SMichał Winiarski ppgtt = i915_ppgtt_create(to_gt(i915), 0); 180310be98a7SChris Wilson if (IS_ERR(ppgtt)) 180410be98a7SChris Wilson return PTR_ERR(ppgtt); 180510be98a7SChris Wilson 180610be98a7SChris Wilson if (args->extensions) { 180710be98a7SChris Wilson err = i915_user_extensions(u64_to_user_ptr(args->extensions), 180810be98a7SChris Wilson NULL, 0, 180910be98a7SChris Wilson ppgtt); 181010be98a7SChris Wilson if (err) 181110be98a7SChris Wilson goto err_put; 181210be98a7SChris Wilson } 181310be98a7SChris Wilson 18145dbd2b7bSChris Wilson err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 18155dbd2b7bSChris Wilson xa_limit_32b, GFP_KERNEL); 181610be98a7SChris Wilson if (err) 181710be98a7SChris Wilson goto err_put; 181810be98a7SChris Wilson 18195dbd2b7bSChris Wilson GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 18205dbd2b7bSChris Wilson args->vm_id = id; 182110be98a7SChris Wilson return 0; 182210be98a7SChris Wilson 182310be98a7SChris Wilson err_put: 1824e568ac38SChris Wilson i915_vm_put(&ppgtt->vm); 182510be98a7SChris Wilson return err; 182610be98a7SChris Wilson } 182710be98a7SChris Wilson 182810be98a7SChris Wilson int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 182910be98a7SChris Wilson struct drm_file *file) 183010be98a7SChris Wilson { 183110be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 183210be98a7SChris Wilson struct drm_i915_gem_vm_control *args = data; 1833e568ac38SChris Wilson struct i915_address_space *vm; 183410be98a7SChris Wilson 183510be98a7SChris Wilson if (args->flags) 183610be98a7SChris Wilson return -EINVAL; 183710be98a7SChris Wilson 183810be98a7SChris Wilson if (args->extensions) 183910be98a7SChris Wilson return -EINVAL; 184010be98a7SChris Wilson 18415dbd2b7bSChris Wilson vm = xa_erase(&file_priv->vm_xa, args->vm_id); 1842e568ac38SChris Wilson if (!vm) 184310be98a7SChris Wilson return -ENOENT; 184410be98a7SChris Wilson 1845e568ac38SChris Wilson i915_vm_put(vm); 184610be98a7SChris Wilson return 0; 184710be98a7SChris Wilson } 184810be98a7SChris Wilson 184910be98a7SChris Wilson static int get_ppgtt(struct drm_i915_file_private *file_priv, 185010be98a7SChris Wilson struct i915_gem_context *ctx, 185110be98a7SChris Wilson struct drm_i915_gem_context_param *args) 185210be98a7SChris Wilson { 1853e568ac38SChris Wilson struct i915_address_space *vm; 18545dbd2b7bSChris Wilson int err; 18555dbd2b7bSChris Wilson u32 id; 185610be98a7SChris Wilson 1857a82a9979SDaniel Vetter if (!i915_gem_context_has_full_ppgtt(ctx)) 185810be98a7SChris Wilson return -ENODEV; 185910be98a7SChris Wilson 18609ec8795eSDaniel Vetter vm = ctx->vm; 18619ec8795eSDaniel Vetter GEM_BUG_ON(!vm); 186290211ea4SChris Wilson 186390211ea4SChris Wilson err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 18645dbd2b7bSChris Wilson if (err) 18659ec8795eSDaniel Vetter return err; 186610be98a7SChris Wilson 1867e1a7ab4fSThomas Hellström i915_vm_get(vm); 186810be98a7SChris Wilson 18695dbd2b7bSChris Wilson GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 18705dbd2b7bSChris Wilson args->value = id; 187110be98a7SChris Wilson args->size = 0; 187210be98a7SChris Wilson 18735dbd2b7bSChris Wilson return err; 187410be98a7SChris Wilson } 187510be98a7SChris Wilson 187611ecbdddSLionel Landwerlin int 18770b6613c6SVenkata Sandeep Dhanalakota i915_gem_user_to_context_sseu(struct intel_gt *gt, 187810be98a7SChris Wilson const struct drm_i915_gem_context_param_sseu *user, 187910be98a7SChris Wilson struct intel_sseu *context) 188010be98a7SChris Wilson { 18810b6613c6SVenkata Sandeep Dhanalakota const struct sseu_dev_info *device = >->info.sseu; 18820b6613c6SVenkata Sandeep Dhanalakota struct drm_i915_private *i915 = gt->i915; 1883b87d3901SMatt Roper unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0); 188410be98a7SChris Wilson 188510be98a7SChris Wilson /* No zeros in any field. */ 188610be98a7SChris Wilson if (!user->slice_mask || !user->subslice_mask || 188710be98a7SChris Wilson !user->min_eus_per_subslice || !user->max_eus_per_subslice) 188810be98a7SChris Wilson return -EINVAL; 188910be98a7SChris Wilson 189010be98a7SChris Wilson /* Max > min. */ 189110be98a7SChris Wilson if (user->max_eus_per_subslice < user->min_eus_per_subslice) 189210be98a7SChris Wilson return -EINVAL; 189310be98a7SChris Wilson 189410be98a7SChris Wilson /* 189510be98a7SChris Wilson * Some future proofing on the types since the uAPI is wider than the 189610be98a7SChris Wilson * current internal implementation. 189710be98a7SChris Wilson */ 189810be98a7SChris Wilson if (overflows_type(user->slice_mask, context->slice_mask) || 189910be98a7SChris Wilson overflows_type(user->subslice_mask, context->subslice_mask) || 190010be98a7SChris Wilson overflows_type(user->min_eus_per_subslice, 190110be98a7SChris Wilson context->min_eus_per_subslice) || 190210be98a7SChris Wilson overflows_type(user->max_eus_per_subslice, 190310be98a7SChris Wilson context->max_eus_per_subslice)) 190410be98a7SChris Wilson return -EINVAL; 190510be98a7SChris Wilson 190610be98a7SChris Wilson /* Check validity against hardware. */ 190710be98a7SChris Wilson if (user->slice_mask & ~device->slice_mask) 190810be98a7SChris Wilson return -EINVAL; 190910be98a7SChris Wilson 1910b87d3901SMatt Roper if (user->subslice_mask & ~dev_subslice_mask) 191110be98a7SChris Wilson return -EINVAL; 191210be98a7SChris Wilson 191310be98a7SChris Wilson if (user->max_eus_per_subslice > device->max_eus_per_subslice) 191410be98a7SChris Wilson return -EINVAL; 191510be98a7SChris Wilson 191610be98a7SChris Wilson context->slice_mask = user->slice_mask; 191710be98a7SChris Wilson context->subslice_mask = user->subslice_mask; 191810be98a7SChris Wilson context->min_eus_per_subslice = user->min_eus_per_subslice; 191910be98a7SChris Wilson context->max_eus_per_subslice = user->max_eus_per_subslice; 192010be98a7SChris Wilson 192110be98a7SChris Wilson /* Part specific restrictions. */ 192240e1956eSLucas De Marchi if (GRAPHICS_VER(i915) == 11) { 192310be98a7SChris Wilson unsigned int hw_s = hweight8(device->slice_mask); 1924b87d3901SMatt Roper unsigned int hw_ss_per_s = hweight8(dev_subslice_mask); 192510be98a7SChris Wilson unsigned int req_s = hweight8(context->slice_mask); 192610be98a7SChris Wilson unsigned int req_ss = hweight8(context->subslice_mask); 192710be98a7SChris Wilson 192810be98a7SChris Wilson /* 192910be98a7SChris Wilson * Only full subslice enablement is possible if more than one 193010be98a7SChris Wilson * slice is turned on. 193110be98a7SChris Wilson */ 193210be98a7SChris Wilson if (req_s > 1 && req_ss != hw_ss_per_s) 193310be98a7SChris Wilson return -EINVAL; 193410be98a7SChris Wilson 193510be98a7SChris Wilson /* 193610be98a7SChris Wilson * If more than four (SScount bitfield limit) subslices are 193710be98a7SChris Wilson * requested then the number has to be even. 193810be98a7SChris Wilson */ 193910be98a7SChris Wilson if (req_ss > 4 && (req_ss & 1)) 194010be98a7SChris Wilson return -EINVAL; 194110be98a7SChris Wilson 194210be98a7SChris Wilson /* 194310be98a7SChris Wilson * If only one slice is enabled and subslice count is below the 194410be98a7SChris Wilson * device full enablement, it must be at most half of the all 194510be98a7SChris Wilson * available subslices. 194610be98a7SChris Wilson */ 194710be98a7SChris Wilson if (req_s == 1 && req_ss < hw_ss_per_s && 194810be98a7SChris Wilson req_ss > (hw_ss_per_s / 2)) 194910be98a7SChris Wilson return -EINVAL; 195010be98a7SChris Wilson 195110be98a7SChris Wilson /* ABI restriction - VME use case only. */ 195210be98a7SChris Wilson 195310be98a7SChris Wilson /* All slices or one slice only. */ 195410be98a7SChris Wilson if (req_s != 1 && req_s != hw_s) 195510be98a7SChris Wilson return -EINVAL; 195610be98a7SChris Wilson 195710be98a7SChris Wilson /* 195810be98a7SChris Wilson * Half subslices or full enablement only when one slice is 195910be98a7SChris Wilson * enabled. 196010be98a7SChris Wilson */ 196110be98a7SChris Wilson if (req_s == 1 && 196210be98a7SChris Wilson (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 196310be98a7SChris Wilson return -EINVAL; 196410be98a7SChris Wilson 196510be98a7SChris Wilson /* No EU configuration changes. */ 196610be98a7SChris Wilson if ((user->min_eus_per_subslice != 196710be98a7SChris Wilson device->max_eus_per_subslice) || 196810be98a7SChris Wilson (user->max_eus_per_subslice != 196910be98a7SChris Wilson device->max_eus_per_subslice)) 197010be98a7SChris Wilson return -EINVAL; 197110be98a7SChris Wilson } 197210be98a7SChris Wilson 197310be98a7SChris Wilson return 0; 197410be98a7SChris Wilson } 197510be98a7SChris Wilson 197610be98a7SChris Wilson static int set_sseu(struct i915_gem_context *ctx, 197710be98a7SChris Wilson struct drm_i915_gem_context_param *args) 197810be98a7SChris Wilson { 197910be98a7SChris Wilson struct drm_i915_private *i915 = ctx->i915; 198010be98a7SChris Wilson struct drm_i915_gem_context_param_sseu user_sseu; 198110be98a7SChris Wilson struct intel_context *ce; 198210be98a7SChris Wilson struct intel_sseu sseu; 198310be98a7SChris Wilson unsigned long lookup; 198410be98a7SChris Wilson int ret; 198510be98a7SChris Wilson 198610be98a7SChris Wilson if (args->size < sizeof(user_sseu)) 198710be98a7SChris Wilson return -EINVAL; 198810be98a7SChris Wilson 198940e1956eSLucas De Marchi if (GRAPHICS_VER(i915) != 11) 199010be98a7SChris Wilson return -ENODEV; 199110be98a7SChris Wilson 199210be98a7SChris Wilson if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 199310be98a7SChris Wilson sizeof(user_sseu))) 199410be98a7SChris Wilson return -EFAULT; 199510be98a7SChris Wilson 199610be98a7SChris Wilson if (user_sseu.rsvd) 199710be98a7SChris Wilson return -EINVAL; 199810be98a7SChris Wilson 199910be98a7SChris Wilson if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 200010be98a7SChris Wilson return -EINVAL; 200110be98a7SChris Wilson 200210be98a7SChris Wilson lookup = 0; 200310be98a7SChris Wilson if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 200410be98a7SChris Wilson lookup |= LOOKUP_USER_INDEX; 200510be98a7SChris Wilson 200610be98a7SChris Wilson ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 200710be98a7SChris Wilson if (IS_ERR(ce)) 200810be98a7SChris Wilson return PTR_ERR(ce); 200910be98a7SChris Wilson 201010be98a7SChris Wilson /* Only render engine supports RPCS configuration. */ 201110be98a7SChris Wilson if (ce->engine->class != RENDER_CLASS) { 201210be98a7SChris Wilson ret = -ENODEV; 201310be98a7SChris Wilson goto out_ce; 201410be98a7SChris Wilson } 201510be98a7SChris Wilson 20160b6613c6SVenkata Sandeep Dhanalakota ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); 201710be98a7SChris Wilson if (ret) 201810be98a7SChris Wilson goto out_ce; 201910be98a7SChris Wilson 202010be98a7SChris Wilson ret = intel_context_reconfigure_sseu(ce, sseu); 202110be98a7SChris Wilson if (ret) 202210be98a7SChris Wilson goto out_ce; 202310be98a7SChris Wilson 202410be98a7SChris Wilson args->size = sizeof(user_sseu); 202510be98a7SChris Wilson 202610be98a7SChris Wilson out_ce: 202710be98a7SChris Wilson intel_context_put(ce); 202810be98a7SChris Wilson return ret; 202910be98a7SChris Wilson } 203010be98a7SChris Wilson 203110be98a7SChris Wilson static int 2032a0e04715SChris Wilson set_persistence(struct i915_gem_context *ctx, 2033a0e04715SChris Wilson const struct drm_i915_gem_context_param *args) 2034a0e04715SChris Wilson { 2035a0e04715SChris Wilson if (args->size) 2036a0e04715SChris Wilson return -EINVAL; 2037a0e04715SChris Wilson 2038a0e04715SChris Wilson return __context_set_persistence(ctx, args->value); 2039a0e04715SChris Wilson } 2040a0e04715SChris Wilson 20410f100b70SChris Wilson static int set_priority(struct i915_gem_context *ctx, 20420f100b70SChris Wilson const struct drm_i915_gem_context_param *args) 20430f100b70SChris Wilson { 2044b9709057SDaniel Vetter struct i915_gem_engines_iter it; 2045b9709057SDaniel Vetter struct intel_context *ce; 2046aaa5957cSJason Ekstrand int err; 20470f100b70SChris Wilson 2048aaa5957cSJason Ekstrand err = validate_priority(ctx->i915, args); 2049aaa5957cSJason Ekstrand if (err) 2050aaa5957cSJason Ekstrand return err; 20510f100b70SChris Wilson 2052aaa5957cSJason Ekstrand ctx->sched.priority = args->value; 2053b9709057SDaniel Vetter 2054b9709057SDaniel Vetter for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 2055b9709057SDaniel Vetter if (!intel_engine_has_timeslices(ce->engine)) 2056b9709057SDaniel Vetter continue; 2057b9709057SDaniel Vetter 2058b9709057SDaniel Vetter if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 2059b9709057SDaniel Vetter intel_engine_has_semaphores(ce->engine)) 2060b9709057SDaniel Vetter intel_context_set_use_semaphores(ce); 2061b9709057SDaniel Vetter else 2062b9709057SDaniel Vetter intel_context_clear_use_semaphores(ce); 2063b9709057SDaniel Vetter } 2064b9709057SDaniel Vetter i915_gem_context_unlock_engines(ctx); 20650f100b70SChris Wilson 20660f100b70SChris Wilson return 0; 20670f100b70SChris Wilson } 20680f100b70SChris Wilson 2069d3ac8d42SDaniele Ceraolo Spurio static int get_protected(struct i915_gem_context *ctx, 2070d3ac8d42SDaniele Ceraolo Spurio struct drm_i915_gem_context_param *args) 2071d3ac8d42SDaniele Ceraolo Spurio { 2072d3ac8d42SDaniele Ceraolo Spurio args->size = 0; 2073d3ac8d42SDaniele Ceraolo Spurio args->value = i915_gem_context_uses_protected_content(ctx); 2074d3ac8d42SDaniele Ceraolo Spurio 2075d3ac8d42SDaniele Ceraolo Spurio return 0; 2076d3ac8d42SDaniele Ceraolo Spurio } 2077d3ac8d42SDaniele Ceraolo Spurio 207810be98a7SChris Wilson static int ctx_setparam(struct drm_i915_file_private *fpriv, 207910be98a7SChris Wilson struct i915_gem_context *ctx, 208010be98a7SChris Wilson struct drm_i915_gem_context_param *args) 208110be98a7SChris Wilson { 208210be98a7SChris Wilson int ret = 0; 208310be98a7SChris Wilson 208410be98a7SChris Wilson switch (args->param) { 208510be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 208610be98a7SChris Wilson if (args->size) 208710be98a7SChris Wilson ret = -EINVAL; 208810be98a7SChris Wilson else if (args->value) 208910be98a7SChris Wilson i915_gem_context_set_no_error_capture(ctx); 209010be98a7SChris Wilson else 209110be98a7SChris Wilson i915_gem_context_clear_no_error_capture(ctx); 209210be98a7SChris Wilson break; 209310be98a7SChris Wilson 209410be98a7SChris Wilson case I915_CONTEXT_PARAM_BANNABLE: 209510be98a7SChris Wilson if (args->size) 209610be98a7SChris Wilson ret = -EINVAL; 209710be98a7SChris Wilson else if (!capable(CAP_SYS_ADMIN) && !args->value) 209810be98a7SChris Wilson ret = -EPERM; 209910be98a7SChris Wilson else if (args->value) 210010be98a7SChris Wilson i915_gem_context_set_bannable(ctx); 2101d3ac8d42SDaniele Ceraolo Spurio else if (i915_gem_context_uses_protected_content(ctx)) 2102d3ac8d42SDaniele Ceraolo Spurio ret = -EPERM; /* can't clear this for protected contexts */ 210310be98a7SChris Wilson else 210410be98a7SChris Wilson i915_gem_context_clear_bannable(ctx); 210510be98a7SChris Wilson break; 210610be98a7SChris Wilson 210710be98a7SChris Wilson case I915_CONTEXT_PARAM_RECOVERABLE: 210810be98a7SChris Wilson if (args->size) 210910be98a7SChris Wilson ret = -EINVAL; 2110d3ac8d42SDaniele Ceraolo Spurio else if (!args->value) 211110be98a7SChris Wilson i915_gem_context_clear_recoverable(ctx); 2112d3ac8d42SDaniele Ceraolo Spurio else if (i915_gem_context_uses_protected_content(ctx)) 2113d3ac8d42SDaniele Ceraolo Spurio ret = -EPERM; /* can't set this for protected contexts */ 2114d3ac8d42SDaniele Ceraolo Spurio else 2115d3ac8d42SDaniele Ceraolo Spurio i915_gem_context_set_recoverable(ctx); 211610be98a7SChris Wilson break; 211710be98a7SChris Wilson 211810be98a7SChris Wilson case I915_CONTEXT_PARAM_PRIORITY: 21190f100b70SChris Wilson ret = set_priority(ctx, args); 212010be98a7SChris Wilson break; 212110be98a7SChris Wilson 212210be98a7SChris Wilson case I915_CONTEXT_PARAM_SSEU: 212310be98a7SChris Wilson ret = set_sseu(ctx, args); 212410be98a7SChris Wilson break; 212510be98a7SChris Wilson 2126a0e04715SChris Wilson case I915_CONTEXT_PARAM_PERSISTENCE: 2127a0e04715SChris Wilson ret = set_persistence(ctx, args); 2128a0e04715SChris Wilson break; 2129a0e04715SChris Wilson 2130d3ac8d42SDaniele Ceraolo Spurio case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 21316ff6d61dSJason Ekstrand case I915_CONTEXT_PARAM_NO_ZEROMAP: 213210be98a7SChris Wilson case I915_CONTEXT_PARAM_BAN_PERIOD: 2133fe4751c3SJason Ekstrand case I915_CONTEXT_PARAM_RINGSIZE: 2134ccbc1b97SJason Ekstrand case I915_CONTEXT_PARAM_VM: 2135d9d29c74SJason Ekstrand case I915_CONTEXT_PARAM_ENGINES: 213610be98a7SChris Wilson default: 213710be98a7SChris Wilson ret = -EINVAL; 213810be98a7SChris Wilson break; 213910be98a7SChris Wilson } 214010be98a7SChris Wilson 214110be98a7SChris Wilson return ret; 214210be98a7SChris Wilson } 214310be98a7SChris Wilson 214410be98a7SChris Wilson struct create_ext { 2145d4433c76SJason Ekstrand struct i915_gem_proto_context *pc; 214610be98a7SChris Wilson struct drm_i915_file_private *fpriv; 214710be98a7SChris Wilson }; 214810be98a7SChris Wilson 214910be98a7SChris Wilson static int create_setparam(struct i915_user_extension __user *ext, void *data) 215010be98a7SChris Wilson { 215110be98a7SChris Wilson struct drm_i915_gem_context_create_ext_setparam local; 215210be98a7SChris Wilson const struct create_ext *arg = data; 215310be98a7SChris Wilson 215410be98a7SChris Wilson if (copy_from_user(&local, ext, sizeof(local))) 215510be98a7SChris Wilson return -EFAULT; 215610be98a7SChris Wilson 215710be98a7SChris Wilson if (local.param.ctx_id) 215810be98a7SChris Wilson return -EINVAL; 215910be98a7SChris Wilson 2160d4433c76SJason Ekstrand return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param); 216110be98a7SChris Wilson } 216210be98a7SChris Wilson 21634a766ae4SJason Ekstrand static int invalid_ext(struct i915_user_extension __user *ext, void *data) 216410be98a7SChris Wilson { 216510be98a7SChris Wilson return -EINVAL; 216610be98a7SChris Wilson } 216710be98a7SChris Wilson 216810be98a7SChris Wilson static const i915_user_extension_fn create_extensions[] = { 216910be98a7SChris Wilson [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 21704a766ae4SJason Ekstrand [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext, 217110be98a7SChris Wilson }; 217210be98a7SChris Wilson 217310be98a7SChris Wilson static bool client_is_banned(struct drm_i915_file_private *file_priv) 217410be98a7SChris Wilson { 217510be98a7SChris Wilson return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 217610be98a7SChris Wilson } 217710be98a7SChris Wilson 2178a4c1cdd3SJason Ekstrand static inline struct i915_gem_context * 2179a4c1cdd3SJason Ekstrand __context_lookup(struct drm_i915_file_private *file_priv, u32 id) 2180a4c1cdd3SJason Ekstrand { 2181a4c1cdd3SJason Ekstrand struct i915_gem_context *ctx; 2182a4c1cdd3SJason Ekstrand 2183a4c1cdd3SJason Ekstrand rcu_read_lock(); 2184a4c1cdd3SJason Ekstrand ctx = xa_load(&file_priv->context_xa, id); 2185a4c1cdd3SJason Ekstrand if (ctx && !kref_get_unless_zero(&ctx->ref)) 2186a4c1cdd3SJason Ekstrand ctx = NULL; 2187a4c1cdd3SJason Ekstrand rcu_read_unlock(); 2188a4c1cdd3SJason Ekstrand 2189a4c1cdd3SJason Ekstrand return ctx; 2190a4c1cdd3SJason Ekstrand } 2191a4c1cdd3SJason Ekstrand 2192a4c1cdd3SJason Ekstrand static struct i915_gem_context * 2193a4c1cdd3SJason Ekstrand finalize_create_context_locked(struct drm_i915_file_private *file_priv, 2194a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc, u32 id) 2195a4c1cdd3SJason Ekstrand { 2196a4c1cdd3SJason Ekstrand struct i915_gem_context *ctx; 2197a4c1cdd3SJason Ekstrand void *old; 2198a4c1cdd3SJason Ekstrand 2199a4c1cdd3SJason Ekstrand lockdep_assert_held(&file_priv->proto_context_lock); 2200a4c1cdd3SJason Ekstrand 2201a4c1cdd3SJason Ekstrand ctx = i915_gem_create_context(file_priv->dev_priv, pc); 2202a4c1cdd3SJason Ekstrand if (IS_ERR(ctx)) 2203a4c1cdd3SJason Ekstrand return ctx; 2204a4c1cdd3SJason Ekstrand 2205bed4b455SRob Clark /* 2206bed4b455SRob Clark * One for the xarray and one for the caller. We need to grab 2207bed4b455SRob Clark * the reference *prior* to making the ctx visble to userspace 2208bed4b455SRob Clark * in gem_context_register(), as at any point after that 2209bed4b455SRob Clark * userspace can try to race us with another thread destroying 2210bed4b455SRob Clark * the context under our feet. 2211bed4b455SRob Clark */ 2212bed4b455SRob Clark i915_gem_context_get(ctx); 2213bed4b455SRob Clark 2214a4c1cdd3SJason Ekstrand gem_context_register(ctx, file_priv, id); 2215a4c1cdd3SJason Ekstrand 2216a4c1cdd3SJason Ekstrand old = xa_erase(&file_priv->proto_context_xa, id); 2217a4c1cdd3SJason Ekstrand GEM_BUG_ON(old != pc); 2218d3ac8d42SDaniele Ceraolo Spurio proto_context_close(file_priv->dev_priv, pc); 2219a4c1cdd3SJason Ekstrand 2220bed4b455SRob Clark return ctx; 2221a4c1cdd3SJason Ekstrand } 2222a4c1cdd3SJason Ekstrand 2223a4c1cdd3SJason Ekstrand struct i915_gem_context * 2224a4c1cdd3SJason Ekstrand i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 2225a4c1cdd3SJason Ekstrand { 2226a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc; 2227a4c1cdd3SJason Ekstrand struct i915_gem_context *ctx; 2228a4c1cdd3SJason Ekstrand 2229a4c1cdd3SJason Ekstrand ctx = __context_lookup(file_priv, id); 2230a4c1cdd3SJason Ekstrand if (ctx) 2231a4c1cdd3SJason Ekstrand return ctx; 2232a4c1cdd3SJason Ekstrand 2233a4c1cdd3SJason Ekstrand mutex_lock(&file_priv->proto_context_lock); 2234a4c1cdd3SJason Ekstrand /* Try one more time under the lock */ 2235a4c1cdd3SJason Ekstrand ctx = __context_lookup(file_priv, id); 2236a4c1cdd3SJason Ekstrand if (!ctx) { 2237a4c1cdd3SJason Ekstrand pc = xa_load(&file_priv->proto_context_xa, id); 2238a4c1cdd3SJason Ekstrand if (!pc) 2239a4c1cdd3SJason Ekstrand ctx = ERR_PTR(-ENOENT); 2240a4c1cdd3SJason Ekstrand else 2241a4c1cdd3SJason Ekstrand ctx = finalize_create_context_locked(file_priv, pc, id); 2242a4c1cdd3SJason Ekstrand } 2243a4c1cdd3SJason Ekstrand mutex_unlock(&file_priv->proto_context_lock); 2244a4c1cdd3SJason Ekstrand 2245a4c1cdd3SJason Ekstrand return ctx; 2246a4c1cdd3SJason Ekstrand } 2247a4c1cdd3SJason Ekstrand 224810be98a7SChris Wilson int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 224910be98a7SChris Wilson struct drm_file *file) 225010be98a7SChris Wilson { 225110be98a7SChris Wilson struct drm_i915_private *i915 = to_i915(dev); 225210be98a7SChris Wilson struct drm_i915_gem_context_create_ext *args = data; 225310be98a7SChris Wilson struct create_ext ext_data; 225410be98a7SChris Wilson int ret; 2255c100777cSTvrtko Ursulin u32 id; 225610be98a7SChris Wilson 225710be98a7SChris Wilson if (!DRIVER_CAPS(i915)->has_logical_contexts) 225810be98a7SChris Wilson return -ENODEV; 225910be98a7SChris Wilson 226010be98a7SChris Wilson if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 226110be98a7SChris Wilson return -EINVAL; 226210be98a7SChris Wilson 22631a9c4db4SMichał Winiarski ret = intel_gt_terminally_wedged(to_gt(i915)); 226410be98a7SChris Wilson if (ret) 226510be98a7SChris Wilson return ret; 226610be98a7SChris Wilson 226710be98a7SChris Wilson ext_data.fpriv = file->driver_priv; 226810be98a7SChris Wilson if (client_is_banned(ext_data.fpriv)) { 2269baa89ba3SWambui Karuga drm_dbg(&i915->drm, 2270baa89ba3SWambui Karuga "client %s[%d] banned from creating ctx\n", 2271ba16a48aSTvrtko Ursulin current->comm, task_pid_nr(current)); 227210be98a7SChris Wilson return -EIO; 227310be98a7SChris Wilson } 227410be98a7SChris Wilson 2275d4433c76SJason Ekstrand ext_data.pc = proto_context_create(i915, args->flags); 2276d4433c76SJason Ekstrand if (IS_ERR(ext_data.pc)) 2277d4433c76SJason Ekstrand return PTR_ERR(ext_data.pc); 227810be98a7SChris Wilson 227910be98a7SChris Wilson if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 228010be98a7SChris Wilson ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 228110be98a7SChris Wilson create_extensions, 228210be98a7SChris Wilson ARRAY_SIZE(create_extensions), 228310be98a7SChris Wilson &ext_data); 2284a4c1cdd3SJason Ekstrand if (ret) 2285a4c1cdd3SJason Ekstrand goto err_pc; 228610be98a7SChris Wilson } 228710be98a7SChris Wilson 2288ca06f936SJason Ekstrand if (GRAPHICS_VER(i915) > 12) { 2289ca06f936SJason Ekstrand struct i915_gem_context *ctx; 2290ca06f936SJason Ekstrand 2291ca06f936SJason Ekstrand /* Get ourselves a context ID */ 2292ca06f936SJason Ekstrand ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL, 2293ca06f936SJason Ekstrand xa_limit_32b, GFP_KERNEL); 2294ca06f936SJason Ekstrand if (ret) 2295ca06f936SJason Ekstrand goto err_pc; 2296ca06f936SJason Ekstrand 2297ca06f936SJason Ekstrand ctx = i915_gem_create_context(i915, ext_data.pc); 2298ca06f936SJason Ekstrand if (IS_ERR(ctx)) { 2299ca06f936SJason Ekstrand ret = PTR_ERR(ctx); 2300ca06f936SJason Ekstrand goto err_pc; 2301ca06f936SJason Ekstrand } 2302ca06f936SJason Ekstrand 2303d3ac8d42SDaniele Ceraolo Spurio proto_context_close(i915, ext_data.pc); 2304ca06f936SJason Ekstrand gem_context_register(ctx, ext_data.fpriv, id); 2305ca06f936SJason Ekstrand } else { 2306a4c1cdd3SJason Ekstrand ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id); 230710be98a7SChris Wilson if (ret < 0) 2308a4c1cdd3SJason Ekstrand goto err_pc; 2309ca06f936SJason Ekstrand } 231010be98a7SChris Wilson 2311c100777cSTvrtko Ursulin args->ctx_id = id; 231210be98a7SChris Wilson 231310be98a7SChris Wilson return 0; 231410be98a7SChris Wilson 2315a4c1cdd3SJason Ekstrand err_pc: 2316d3ac8d42SDaniele Ceraolo Spurio proto_context_close(i915, ext_data.pc); 231710be98a7SChris Wilson return ret; 231810be98a7SChris Wilson } 231910be98a7SChris Wilson 232010be98a7SChris Wilson int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 232110be98a7SChris Wilson struct drm_file *file) 232210be98a7SChris Wilson { 232310be98a7SChris Wilson struct drm_i915_gem_context_destroy *args = data; 232410be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 2325a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc; 232610be98a7SChris Wilson struct i915_gem_context *ctx; 232710be98a7SChris Wilson 232810be98a7SChris Wilson if (args->pad != 0) 232910be98a7SChris Wilson return -EINVAL; 233010be98a7SChris Wilson 233110be98a7SChris Wilson if (!args->ctx_id) 233210be98a7SChris Wilson return -ENOENT; 233310be98a7SChris Wilson 2334a4c1cdd3SJason Ekstrand /* We need to hold the proto-context lock here to prevent races 2335a4c1cdd3SJason Ekstrand * with finalize_create_context_locked(). 2336a4c1cdd3SJason Ekstrand */ 2337a4c1cdd3SJason Ekstrand mutex_lock(&file_priv->proto_context_lock); 2338c100777cSTvrtko Ursulin ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2339a4c1cdd3SJason Ekstrand pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id); 2340a4c1cdd3SJason Ekstrand mutex_unlock(&file_priv->proto_context_lock); 234110be98a7SChris Wilson 2342a4c1cdd3SJason Ekstrand if (!ctx && !pc) 2343a4c1cdd3SJason Ekstrand return -ENOENT; 2344a4c1cdd3SJason Ekstrand GEM_WARN_ON(ctx && pc); 2345a4c1cdd3SJason Ekstrand 2346a4c1cdd3SJason Ekstrand if (pc) 2347d3ac8d42SDaniele Ceraolo Spurio proto_context_close(file_priv->dev_priv, pc); 2348a4c1cdd3SJason Ekstrand 2349a4c1cdd3SJason Ekstrand if (ctx) 235010be98a7SChris Wilson context_close(ctx); 2351a4c1cdd3SJason Ekstrand 235210be98a7SChris Wilson return 0; 235310be98a7SChris Wilson } 235410be98a7SChris Wilson 235510be98a7SChris Wilson static int get_sseu(struct i915_gem_context *ctx, 235610be98a7SChris Wilson struct drm_i915_gem_context_param *args) 235710be98a7SChris Wilson { 235810be98a7SChris Wilson struct drm_i915_gem_context_param_sseu user_sseu; 235910be98a7SChris Wilson struct intel_context *ce; 236010be98a7SChris Wilson unsigned long lookup; 236110be98a7SChris Wilson int err; 236210be98a7SChris Wilson 236310be98a7SChris Wilson if (args->size == 0) 236410be98a7SChris Wilson goto out; 236510be98a7SChris Wilson else if (args->size < sizeof(user_sseu)) 236610be98a7SChris Wilson return -EINVAL; 236710be98a7SChris Wilson 236810be98a7SChris Wilson if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 236910be98a7SChris Wilson sizeof(user_sseu))) 237010be98a7SChris Wilson return -EFAULT; 237110be98a7SChris Wilson 237210be98a7SChris Wilson if (user_sseu.rsvd) 237310be98a7SChris Wilson return -EINVAL; 237410be98a7SChris Wilson 237510be98a7SChris Wilson if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 237610be98a7SChris Wilson return -EINVAL; 237710be98a7SChris Wilson 237810be98a7SChris Wilson lookup = 0; 237910be98a7SChris Wilson if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 238010be98a7SChris Wilson lookup |= LOOKUP_USER_INDEX; 238110be98a7SChris Wilson 238210be98a7SChris Wilson ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 238310be98a7SChris Wilson if (IS_ERR(ce)) 238410be98a7SChris Wilson return PTR_ERR(ce); 238510be98a7SChris Wilson 238610be98a7SChris Wilson err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 238710be98a7SChris Wilson if (err) { 238810be98a7SChris Wilson intel_context_put(ce); 238910be98a7SChris Wilson return err; 239010be98a7SChris Wilson } 239110be98a7SChris Wilson 239210be98a7SChris Wilson user_sseu.slice_mask = ce->sseu.slice_mask; 239310be98a7SChris Wilson user_sseu.subslice_mask = ce->sseu.subslice_mask; 239410be98a7SChris Wilson user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 239510be98a7SChris Wilson user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 239610be98a7SChris Wilson 239710be98a7SChris Wilson intel_context_unlock_pinned(ce); 239810be98a7SChris Wilson intel_context_put(ce); 239910be98a7SChris Wilson 240010be98a7SChris Wilson if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 240110be98a7SChris Wilson sizeof(user_sseu))) 240210be98a7SChris Wilson return -EFAULT; 240310be98a7SChris Wilson 240410be98a7SChris Wilson out: 240510be98a7SChris Wilson args->size = sizeof(user_sseu); 240610be98a7SChris Wilson 240710be98a7SChris Wilson return 0; 240810be98a7SChris Wilson } 240910be98a7SChris Wilson 241010be98a7SChris Wilson int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 241110be98a7SChris Wilson struct drm_file *file) 241210be98a7SChris Wilson { 241310be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 241410be98a7SChris Wilson struct drm_i915_gem_context_param *args = data; 241510be98a7SChris Wilson struct i915_gem_context *ctx; 241624fad29eSDaniel Vetter struct i915_address_space *vm; 241710be98a7SChris Wilson int ret = 0; 241810be98a7SChris Wilson 241910be98a7SChris Wilson ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2420046d1660SJason Ekstrand if (IS_ERR(ctx)) 2421046d1660SJason Ekstrand return PTR_ERR(ctx); 242210be98a7SChris Wilson 242310be98a7SChris Wilson switch (args->param) { 242410be98a7SChris Wilson case I915_CONTEXT_PARAM_GTT_SIZE: 242510be98a7SChris Wilson args->size = 0; 242624fad29eSDaniel Vetter vm = i915_gem_context_get_eb_vm(ctx); 242724fad29eSDaniel Vetter args->value = vm->total; 242824fad29eSDaniel Vetter i915_vm_put(vm); 242924fad29eSDaniel Vetter 243010be98a7SChris Wilson break; 243110be98a7SChris Wilson 243210be98a7SChris Wilson case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 243310be98a7SChris Wilson args->size = 0; 243410be98a7SChris Wilson args->value = i915_gem_context_no_error_capture(ctx); 243510be98a7SChris Wilson break; 243610be98a7SChris Wilson 243710be98a7SChris Wilson case I915_CONTEXT_PARAM_BANNABLE: 243810be98a7SChris Wilson args->size = 0; 243910be98a7SChris Wilson args->value = i915_gem_context_is_bannable(ctx); 244010be98a7SChris Wilson break; 244110be98a7SChris Wilson 244210be98a7SChris Wilson case I915_CONTEXT_PARAM_RECOVERABLE: 244310be98a7SChris Wilson args->size = 0; 244410be98a7SChris Wilson args->value = i915_gem_context_is_recoverable(ctx); 244510be98a7SChris Wilson break; 244610be98a7SChris Wilson 244710be98a7SChris Wilson case I915_CONTEXT_PARAM_PRIORITY: 244810be98a7SChris Wilson args->size = 0; 2449eb5c10cbSChris Wilson args->value = ctx->sched.priority; 245010be98a7SChris Wilson break; 245110be98a7SChris Wilson 245210be98a7SChris Wilson case I915_CONTEXT_PARAM_SSEU: 245310be98a7SChris Wilson ret = get_sseu(ctx, args); 245410be98a7SChris Wilson break; 245510be98a7SChris Wilson 245610be98a7SChris Wilson case I915_CONTEXT_PARAM_VM: 245710be98a7SChris Wilson ret = get_ppgtt(file_priv, ctx, args); 245810be98a7SChris Wilson break; 245910be98a7SChris Wilson 2460a0e04715SChris Wilson case I915_CONTEXT_PARAM_PERSISTENCE: 2461a0e04715SChris Wilson args->size = 0; 2462a0e04715SChris Wilson args->value = i915_gem_context_is_persistent(ctx); 2463a0e04715SChris Wilson break; 2464a0e04715SChris Wilson 2465d3ac8d42SDaniele Ceraolo Spurio case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 2466d3ac8d42SDaniele Ceraolo Spurio ret = get_protected(ctx, args); 2467d3ac8d42SDaniele Ceraolo Spurio break; 2468d3ac8d42SDaniele Ceraolo Spurio 24696ff6d61dSJason Ekstrand case I915_CONTEXT_PARAM_NO_ZEROMAP: 247010be98a7SChris Wilson case I915_CONTEXT_PARAM_BAN_PERIOD: 2471c7a71fc8SJason Ekstrand case I915_CONTEXT_PARAM_ENGINES: 2472fe4751c3SJason Ekstrand case I915_CONTEXT_PARAM_RINGSIZE: 247310be98a7SChris Wilson default: 247410be98a7SChris Wilson ret = -EINVAL; 247510be98a7SChris Wilson break; 247610be98a7SChris Wilson } 247710be98a7SChris Wilson 247810be98a7SChris Wilson i915_gem_context_put(ctx); 247910be98a7SChris Wilson return ret; 248010be98a7SChris Wilson } 248110be98a7SChris Wilson 248210be98a7SChris Wilson int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 248310be98a7SChris Wilson struct drm_file *file) 248410be98a7SChris Wilson { 248510be98a7SChris Wilson struct drm_i915_file_private *file_priv = file->driver_priv; 248610be98a7SChris Wilson struct drm_i915_gem_context_param *args = data; 2487a4c1cdd3SJason Ekstrand struct i915_gem_proto_context *pc; 248810be98a7SChris Wilson struct i915_gem_context *ctx; 2489a4c1cdd3SJason Ekstrand int ret = 0; 249010be98a7SChris Wilson 2491a4c1cdd3SJason Ekstrand mutex_lock(&file_priv->proto_context_lock); 2492a4c1cdd3SJason Ekstrand ctx = __context_lookup(file_priv, args->ctx_id); 2493a4c1cdd3SJason Ekstrand if (!ctx) { 2494a4c1cdd3SJason Ekstrand pc = xa_load(&file_priv->proto_context_xa, args->ctx_id); 2495ca06f936SJason Ekstrand if (pc) { 2496ca06f936SJason Ekstrand /* Contexts should be finalized inside 2497ca06f936SJason Ekstrand * GEM_CONTEXT_CREATE starting with graphics 2498ca06f936SJason Ekstrand * version 13. 2499ca06f936SJason Ekstrand */ 2500ca06f936SJason Ekstrand WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12); 2501a4c1cdd3SJason Ekstrand ret = set_proto_ctx_param(file_priv, pc, args); 2502ca06f936SJason Ekstrand } else { 2503a4c1cdd3SJason Ekstrand ret = -ENOENT; 2504a4c1cdd3SJason Ekstrand } 2505ca06f936SJason Ekstrand } 2506a4c1cdd3SJason Ekstrand mutex_unlock(&file_priv->proto_context_lock); 250710be98a7SChris Wilson 2508a4c1cdd3SJason Ekstrand if (ctx) { 250910be98a7SChris Wilson ret = ctx_setparam(file_priv, ctx, args); 251010be98a7SChris Wilson i915_gem_context_put(ctx); 2511a4c1cdd3SJason Ekstrand } 2512a4c1cdd3SJason Ekstrand 251310be98a7SChris Wilson return ret; 251410be98a7SChris Wilson } 251510be98a7SChris Wilson 251610be98a7SChris Wilson int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 251710be98a7SChris Wilson void *data, struct drm_file *file) 251810be98a7SChris Wilson { 2519a4e7ccdaSChris Wilson struct drm_i915_private *i915 = to_i915(dev); 252010be98a7SChris Wilson struct drm_i915_reset_stats *args = data; 252110be98a7SChris Wilson struct i915_gem_context *ctx; 252210be98a7SChris Wilson 252310be98a7SChris Wilson if (args->flags || args->pad) 252410be98a7SChris Wilson return -EINVAL; 252510be98a7SChris Wilson 2526a4839cb1SJason Ekstrand ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); 2527046d1660SJason Ekstrand if (IS_ERR(ctx)) 2528046d1660SJason Ekstrand return PTR_ERR(ctx); 252910be98a7SChris Wilson 253010be98a7SChris Wilson /* 253110be98a7SChris Wilson * We opt for unserialised reads here. This may result in tearing 253210be98a7SChris Wilson * in the extremely unlikely event of a GPU hang on this context 253310be98a7SChris Wilson * as we are querying them. If we need that extra layer of protection, 253410be98a7SChris Wilson * we should wrap the hangstats with a seqlock. 253510be98a7SChris Wilson */ 253610be98a7SChris Wilson 253710be98a7SChris Wilson if (capable(CAP_SYS_ADMIN)) 2538a4e7ccdaSChris Wilson args->reset_count = i915_reset_count(&i915->gpu_error); 253910be98a7SChris Wilson else 254010be98a7SChris Wilson args->reset_count = 0; 254110be98a7SChris Wilson 254210be98a7SChris Wilson args->batch_active = atomic_read(&ctx->guilty_count); 254310be98a7SChris Wilson args->batch_pending = atomic_read(&ctx->active_count); 254410be98a7SChris Wilson 2545a4839cb1SJason Ekstrand i915_gem_context_put(ctx); 2546a4839cb1SJason Ekstrand return 0; 254710be98a7SChris Wilson } 254810be98a7SChris Wilson 254910be98a7SChris Wilson /* GEM context-engines iterator: for_each_gem_engine() */ 255010be98a7SChris Wilson struct intel_context * 255110be98a7SChris Wilson i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 255210be98a7SChris Wilson { 255310be98a7SChris Wilson const struct i915_gem_engines *e = it->engines; 255410be98a7SChris Wilson struct intel_context *ctx; 255510be98a7SChris Wilson 2556130a95e9SChris Wilson if (unlikely(!e)) 2557130a95e9SChris Wilson return NULL; 2558130a95e9SChris Wilson 255910be98a7SChris Wilson do { 256010be98a7SChris Wilson if (it->idx >= e->num_engines) 256110be98a7SChris Wilson return NULL; 256210be98a7SChris Wilson 256310be98a7SChris Wilson ctx = e->engines[it->idx++]; 256410be98a7SChris Wilson } while (!ctx); 256510be98a7SChris Wilson 256610be98a7SChris Wilson return ctx; 256710be98a7SChris Wilson } 256810be98a7SChris Wilson 256910be98a7SChris Wilson #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 257010be98a7SChris Wilson #include "selftests/mock_context.c" 257110be98a7SChris Wilson #include "selftests/i915_gem_context.c" 257210be98a7SChris Wilson #endif 257310be98a7SChris Wilson 2574a6270d1dSDaniel Vetter void i915_gem_context_module_exit(void) 257510be98a7SChris Wilson { 2576a6270d1dSDaniel Vetter kmem_cache_destroy(slab_luts); 257710be98a7SChris Wilson } 257810be98a7SChris Wilson 2579a6270d1dSDaniel Vetter int __init i915_gem_context_module_init(void) 258010be98a7SChris Wilson { 2581a6270d1dSDaniel Vetter slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2582a6270d1dSDaniel Vetter if (!slab_luts) 258310be98a7SChris Wilson return -ENOMEM; 258410be98a7SChris Wilson 258510be98a7SChris Wilson return 0; 258610be98a7SChris Wilson } 2587