1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2011-2012 Intel Corporation 5 */ 6 7 /* 8 * This file implements HW context support. On gen5+ a HW context consists of an 9 * opaque GPU object which is referenced at times of context saves and restores. 10 * With RC6 enabled, the context is also referenced as the GPU enters and exists 11 * from RC6 (GPU has it's own internal power context, except on gen5). Though 12 * something like a context does exist for the media ring, the code only 13 * supports contexts for the render ring. 14 * 15 * In software, there is a distinction between contexts created by the user, 16 * and the default HW context. The default HW context is used by GPU clients 17 * that do not request setup of their own hardware context. The default 18 * context's state is never restored to help prevent programming errors. This 19 * would happen if a client ran and piggy-backed off another clients GPU state. 20 * The default context only exists to give the GPU some offset to load as the 21 * current to invoke a save of the context we actually care about. In fact, the 22 * code could likely be constructed, albeit in a more complicated fashion, to 23 * never use the default context, though that limits the driver's ability to 24 * swap out, and/or destroy other contexts. 25 * 26 * All other contexts are created as a request by the GPU client. These contexts 27 * store GPU state, and thus allow GPU clients to not re-emit state (and 28 * potentially query certain state) at any time. The kernel driver makes 29 * certain that the appropriate commands are inserted. 30 * 31 * The context life cycle is semi-complicated in that context BOs may live 32 * longer than the context itself because of the way the hardware, and object 33 * tracking works. Below is a very crude representation of the state machine 34 * describing the context life. 35 * refcount pincount active 36 * S0: initial state 0 0 0 37 * S1: context created 1 0 0 38 * S2: context is currently running 2 1 X 39 * S3: GPU referenced, but not current 2 0 1 40 * S4: context is current, but destroyed 1 1 0 41 * S5: like S3, but destroyed 1 0 1 42 * 43 * The most common (but not all) transitions: 44 * S0->S1: client creates a context 45 * S1->S2: client submits execbuf with context 46 * S2->S3: other clients submits execbuf with context 47 * S3->S1: context object was retired 48 * S3->S2: clients submits another execbuf 49 * S2->S4: context destroy called with current context 50 * S3->S5->S0: destroy path 51 * S4->S5->S0: destroy path on current context 52 * 53 * There are two confusing terms used above: 54 * The "current context" means the context which is currently running on the 55 * GPU. The GPU has loaded its state already and has stored away the gtt 56 * offset of the BO. The GPU is not actively referencing the data at this 57 * offset, but it will on the next context switch. The only way to avoid this 58 * is to do a GPU reset. 59 * 60 * An "active context' is one which was previously the "current context" and is 61 * on the active list waiting for the next context switch to occur. Until this 62 * happens, the object must remain at the same gtt offset. It is therefore 63 * possible to destroy a context, but it is still active. 64 * 65 */ 66 67 #include <linux/log2.h> 68 #include <linux/nospec.h> 69 70 #include <drm/drm_cache.h> 71 #include <drm/drm_syncobj.h> 72 73 #include "gt/gen6_ppgtt.h" 74 #include "gt/intel_context.h" 75 #include "gt/intel_context_param.h" 76 #include "gt/intel_engine_heartbeat.h" 77 #include "gt/intel_engine_user.h" 78 #include "gt/intel_gpu_commands.h" 79 #include "gt/intel_ring.h" 80 81 #include "pxp/intel_pxp.h" 82 83 #include "i915_file_private.h" 84 #include "i915_gem_context.h" 85 #include "i915_trace.h" 86 #include "i915_user_extensions.h" 87 88 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 89 90 static struct kmem_cache *slab_luts; 91 92 struct i915_lut_handle *i915_lut_handle_alloc(void) 93 { 94 return kmem_cache_alloc(slab_luts, GFP_KERNEL); 95 } 96 97 void i915_lut_handle_free(struct i915_lut_handle *lut) 98 { 99 return kmem_cache_free(slab_luts, lut); 100 } 101 102 static void lut_close(struct i915_gem_context *ctx) 103 { 104 struct radix_tree_iter iter; 105 void __rcu **slot; 106 107 mutex_lock(&ctx->lut_mutex); 108 rcu_read_lock(); 109 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 110 struct i915_vma *vma = rcu_dereference_raw(*slot); 111 struct drm_i915_gem_object *obj = vma->obj; 112 struct i915_lut_handle *lut; 113 114 if (!kref_get_unless_zero(&obj->base.refcount)) 115 continue; 116 117 spin_lock(&obj->lut_lock); 118 list_for_each_entry(lut, &obj->lut_list, obj_link) { 119 if (lut->ctx != ctx) 120 continue; 121 122 if (lut->handle != iter.index) 123 continue; 124 125 list_del(&lut->obj_link); 126 break; 127 } 128 spin_unlock(&obj->lut_lock); 129 130 if (&lut->obj_link != &obj->lut_list) { 131 i915_lut_handle_free(lut); 132 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 133 i915_vma_close(vma); 134 i915_gem_object_put(obj); 135 } 136 137 i915_gem_object_put(obj); 138 } 139 rcu_read_unlock(); 140 mutex_unlock(&ctx->lut_mutex); 141 } 142 143 static struct intel_context * 144 lookup_user_engine(struct i915_gem_context *ctx, 145 unsigned long flags, 146 const struct i915_engine_class_instance *ci) 147 #define LOOKUP_USER_INDEX BIT(0) 148 { 149 int idx; 150 151 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 152 return ERR_PTR(-EINVAL); 153 154 if (!i915_gem_context_user_engines(ctx)) { 155 struct intel_engine_cs *engine; 156 157 engine = intel_engine_lookup_user(ctx->i915, 158 ci->engine_class, 159 ci->engine_instance); 160 if (!engine) 161 return ERR_PTR(-EINVAL); 162 163 idx = engine->legacy_idx; 164 } else { 165 idx = ci->engine_instance; 166 } 167 168 return i915_gem_context_get_engine(ctx, idx); 169 } 170 171 static int validate_priority(struct drm_i915_private *i915, 172 const struct drm_i915_gem_context_param *args) 173 { 174 s64 priority = args->value; 175 176 if (args->size) 177 return -EINVAL; 178 179 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 180 return -ENODEV; 181 182 if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 183 priority < I915_CONTEXT_MIN_USER_PRIORITY) 184 return -EINVAL; 185 186 if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 187 !capable(CAP_SYS_NICE)) 188 return -EPERM; 189 190 return 0; 191 } 192 193 static void proto_context_close(struct drm_i915_private *i915, 194 struct i915_gem_proto_context *pc) 195 { 196 int i; 197 198 if (pc->pxp_wakeref) 199 intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref); 200 if (pc->vm) 201 i915_vm_put(pc->vm); 202 if (pc->user_engines) { 203 for (i = 0; i < pc->num_user_engines; i++) 204 kfree(pc->user_engines[i].siblings); 205 kfree(pc->user_engines); 206 } 207 kfree(pc); 208 } 209 210 static int proto_context_set_persistence(struct drm_i915_private *i915, 211 struct i915_gem_proto_context *pc, 212 bool persist) 213 { 214 if (persist) { 215 /* 216 * Only contexts that are short-lived [that will expire or be 217 * reset] are allowed to survive past termination. We require 218 * hangcheck to ensure that the persistent requests are healthy. 219 */ 220 if (!i915->params.enable_hangcheck) 221 return -EINVAL; 222 223 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); 224 } else { 225 /* To cancel a context we use "preempt-to-idle" */ 226 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 227 return -ENODEV; 228 229 /* 230 * If the cancel fails, we then need to reset, cleanly! 231 * 232 * If the per-engine reset fails, all hope is lost! We resort 233 * to a full GPU reset in that unlikely case, but realistically 234 * if the engine could not reset, the full reset does not fare 235 * much better. The damage has been done. 236 * 237 * However, if we cannot reset an engine by itself, we cannot 238 * cleanup a hanging persistent context without causing 239 * colateral damage, and we should not pretend we can by 240 * exposing the interface. 241 */ 242 if (!intel_has_reset_engine(to_gt(i915))) 243 return -ENODEV; 244 245 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); 246 } 247 248 return 0; 249 } 250 251 static int proto_context_set_protected(struct drm_i915_private *i915, 252 struct i915_gem_proto_context *pc, 253 bool protected) 254 { 255 int ret = 0; 256 257 if (!protected) { 258 pc->uses_protected_content = false; 259 } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) { 260 ret = -ENODEV; 261 } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || 262 !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { 263 ret = -EPERM; 264 } else { 265 pc->uses_protected_content = true; 266 267 /* 268 * protected context usage requires the PXP session to be up, 269 * which in turn requires the device to be active. 270 */ 271 pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); 272 273 if (!intel_pxp_is_active(&to_gt(i915)->pxp)) 274 ret = intel_pxp_start(&to_gt(i915)->pxp); 275 } 276 277 return ret; 278 } 279 280 static struct i915_gem_proto_context * 281 proto_context_create(struct drm_i915_private *i915, unsigned int flags) 282 { 283 struct i915_gem_proto_context *pc, *err; 284 285 pc = kzalloc(sizeof(*pc), GFP_KERNEL); 286 if (!pc) 287 return ERR_PTR(-ENOMEM); 288 289 pc->num_user_engines = -1; 290 pc->user_engines = NULL; 291 pc->user_flags = BIT(UCONTEXT_BANNABLE) | 292 BIT(UCONTEXT_RECOVERABLE); 293 if (i915->params.enable_hangcheck) 294 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); 295 pc->sched.priority = I915_PRIORITY_NORMAL; 296 297 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 298 if (!HAS_EXECLISTS(i915)) { 299 err = ERR_PTR(-EINVAL); 300 goto proto_close; 301 } 302 pc->single_timeline = true; 303 } 304 305 return pc; 306 307 proto_close: 308 proto_context_close(i915, pc); 309 return err; 310 } 311 312 static int proto_context_register_locked(struct drm_i915_file_private *fpriv, 313 struct i915_gem_proto_context *pc, 314 u32 *id) 315 { 316 int ret; 317 void *old; 318 319 lockdep_assert_held(&fpriv->proto_context_lock); 320 321 ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL); 322 if (ret) 323 return ret; 324 325 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL); 326 if (xa_is_err(old)) { 327 xa_erase(&fpriv->context_xa, *id); 328 return xa_err(old); 329 } 330 WARN_ON(old); 331 332 return 0; 333 } 334 335 static int proto_context_register(struct drm_i915_file_private *fpriv, 336 struct i915_gem_proto_context *pc, 337 u32 *id) 338 { 339 int ret; 340 341 mutex_lock(&fpriv->proto_context_lock); 342 ret = proto_context_register_locked(fpriv, pc, id); 343 mutex_unlock(&fpriv->proto_context_lock); 344 345 return ret; 346 } 347 348 static struct i915_address_space * 349 i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) 350 { 351 struct i915_address_space *vm; 352 353 xa_lock(&file_priv->vm_xa); 354 vm = xa_load(&file_priv->vm_xa, id); 355 if (vm) 356 kref_get(&vm->ref); 357 xa_unlock(&file_priv->vm_xa); 358 359 return vm; 360 } 361 362 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv, 363 struct i915_gem_proto_context *pc, 364 const struct drm_i915_gem_context_param *args) 365 { 366 struct drm_i915_private *i915 = fpriv->dev_priv; 367 struct i915_address_space *vm; 368 369 if (args->size) 370 return -EINVAL; 371 372 if (!HAS_FULL_PPGTT(i915)) 373 return -ENODEV; 374 375 if (upper_32_bits(args->value)) 376 return -ENOENT; 377 378 vm = i915_gem_vm_lookup(fpriv, args->value); 379 if (!vm) 380 return -ENOENT; 381 382 if (pc->vm) 383 i915_vm_put(pc->vm); 384 pc->vm = vm; 385 386 return 0; 387 } 388 389 struct set_proto_ctx_engines { 390 struct drm_i915_private *i915; 391 unsigned num_engines; 392 struct i915_gem_proto_engine *engines; 393 }; 394 395 static int 396 set_proto_ctx_engines_balance(struct i915_user_extension __user *base, 397 void *data) 398 { 399 struct i915_context_engines_load_balance __user *ext = 400 container_of_user(base, typeof(*ext), base); 401 const struct set_proto_ctx_engines *set = data; 402 struct drm_i915_private *i915 = set->i915; 403 struct intel_engine_cs **siblings; 404 u16 num_siblings, idx; 405 unsigned int n; 406 int err; 407 408 if (!HAS_EXECLISTS(i915)) 409 return -ENODEV; 410 411 if (get_user(idx, &ext->engine_index)) 412 return -EFAULT; 413 414 if (idx >= set->num_engines) { 415 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 416 idx, set->num_engines); 417 return -EINVAL; 418 } 419 420 idx = array_index_nospec(idx, set->num_engines); 421 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) { 422 drm_dbg(&i915->drm, 423 "Invalid placement[%d], already occupied\n", idx); 424 return -EEXIST; 425 } 426 427 if (get_user(num_siblings, &ext->num_siblings)) 428 return -EFAULT; 429 430 err = check_user_mbz(&ext->flags); 431 if (err) 432 return err; 433 434 err = check_user_mbz(&ext->mbz64); 435 if (err) 436 return err; 437 438 if (num_siblings == 0) 439 return 0; 440 441 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); 442 if (!siblings) 443 return -ENOMEM; 444 445 for (n = 0; n < num_siblings; n++) { 446 struct i915_engine_class_instance ci; 447 448 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 449 err = -EFAULT; 450 goto err_siblings; 451 } 452 453 siblings[n] = intel_engine_lookup_user(i915, 454 ci.engine_class, 455 ci.engine_instance); 456 if (!siblings[n]) { 457 drm_dbg(&i915->drm, 458 "Invalid sibling[%d]: { class:%d, inst:%d }\n", 459 n, ci.engine_class, ci.engine_instance); 460 err = -EINVAL; 461 goto err_siblings; 462 } 463 } 464 465 if (num_siblings == 1) { 466 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL; 467 set->engines[idx].engine = siblings[0]; 468 kfree(siblings); 469 } else { 470 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED; 471 set->engines[idx].num_siblings = num_siblings; 472 set->engines[idx].siblings = siblings; 473 } 474 475 return 0; 476 477 err_siblings: 478 kfree(siblings); 479 480 return err; 481 } 482 483 static int 484 set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) 485 { 486 struct i915_context_engines_bond __user *ext = 487 container_of_user(base, typeof(*ext), base); 488 const struct set_proto_ctx_engines *set = data; 489 struct drm_i915_private *i915 = set->i915; 490 struct i915_engine_class_instance ci; 491 struct intel_engine_cs *master; 492 u16 idx, num_bonds; 493 int err, n; 494 495 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && 496 !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { 497 drm_dbg(&i915->drm, 498 "Bonding not supported on this platform\n"); 499 return -ENODEV; 500 } 501 502 if (get_user(idx, &ext->virtual_index)) 503 return -EFAULT; 504 505 if (idx >= set->num_engines) { 506 drm_dbg(&i915->drm, 507 "Invalid index for virtual engine: %d >= %d\n", 508 idx, set->num_engines); 509 return -EINVAL; 510 } 511 512 idx = array_index_nospec(idx, set->num_engines); 513 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) { 514 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 515 return -EINVAL; 516 } 517 518 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) { 519 drm_dbg(&i915->drm, 520 "Bonding with virtual engines not allowed\n"); 521 return -EINVAL; 522 } 523 524 err = check_user_mbz(&ext->flags); 525 if (err) 526 return err; 527 528 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 529 err = check_user_mbz(&ext->mbz64[n]); 530 if (err) 531 return err; 532 } 533 534 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 535 return -EFAULT; 536 537 master = intel_engine_lookup_user(i915, 538 ci.engine_class, 539 ci.engine_instance); 540 if (!master) { 541 drm_dbg(&i915->drm, 542 "Unrecognised master engine: { class:%u, instance:%u }\n", 543 ci.engine_class, ci.engine_instance); 544 return -EINVAL; 545 } 546 547 if (intel_engine_uses_guc(master)) { 548 DRM_DEBUG("bonding extension not supported with GuC submission"); 549 return -ENODEV; 550 } 551 552 if (get_user(num_bonds, &ext->num_bonds)) 553 return -EFAULT; 554 555 for (n = 0; n < num_bonds; n++) { 556 struct intel_engine_cs *bond; 557 558 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 559 return -EFAULT; 560 561 bond = intel_engine_lookup_user(i915, 562 ci.engine_class, 563 ci.engine_instance); 564 if (!bond) { 565 drm_dbg(&i915->drm, 566 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 567 n, ci.engine_class, ci.engine_instance); 568 return -EINVAL; 569 } 570 } 571 572 return 0; 573 } 574 575 static int 576 set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, 577 void *data) 578 { 579 struct i915_context_engines_parallel_submit __user *ext = 580 container_of_user(base, typeof(*ext), base); 581 const struct set_proto_ctx_engines *set = data; 582 struct drm_i915_private *i915 = set->i915; 583 struct i915_engine_class_instance prev_engine; 584 u64 flags; 585 int err = 0, n, i, j; 586 u16 slot, width, num_siblings; 587 struct intel_engine_cs **siblings = NULL; 588 intel_engine_mask_t prev_mask; 589 590 if (get_user(slot, &ext->engine_index)) 591 return -EFAULT; 592 593 if (get_user(width, &ext->width)) 594 return -EFAULT; 595 596 if (get_user(num_siblings, &ext->num_siblings)) 597 return -EFAULT; 598 599 if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) && 600 num_siblings != 1) { 601 drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n", 602 num_siblings); 603 return -EINVAL; 604 } 605 606 if (slot >= set->num_engines) { 607 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 608 slot, set->num_engines); 609 return -EINVAL; 610 } 611 612 if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { 613 drm_dbg(&i915->drm, 614 "Invalid placement[%d], already occupied\n", slot); 615 return -EINVAL; 616 } 617 618 if (get_user(flags, &ext->flags)) 619 return -EFAULT; 620 621 if (flags) { 622 drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags); 623 return -EINVAL; 624 } 625 626 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 627 err = check_user_mbz(&ext->mbz64[n]); 628 if (err) 629 return err; 630 } 631 632 if (width < 2) { 633 drm_dbg(&i915->drm, "Width (%d) < 2\n", width); 634 return -EINVAL; 635 } 636 637 if (num_siblings < 1) { 638 drm_dbg(&i915->drm, "Number siblings (%d) < 1\n", 639 num_siblings); 640 return -EINVAL; 641 } 642 643 siblings = kmalloc_array(num_siblings * width, 644 sizeof(*siblings), 645 GFP_KERNEL); 646 if (!siblings) 647 return -ENOMEM; 648 649 /* Create contexts / engines */ 650 for (i = 0; i < width; ++i) { 651 intel_engine_mask_t current_mask = 0; 652 653 for (j = 0; j < num_siblings; ++j) { 654 struct i915_engine_class_instance ci; 655 656 n = i * num_siblings + j; 657 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 658 err = -EFAULT; 659 goto out_err; 660 } 661 662 siblings[n] = 663 intel_engine_lookup_user(i915, ci.engine_class, 664 ci.engine_instance); 665 if (!siblings[n]) { 666 drm_dbg(&i915->drm, 667 "Invalid sibling[%d]: { class:%d, inst:%d }\n", 668 n, ci.engine_class, ci.engine_instance); 669 err = -EINVAL; 670 goto out_err; 671 } 672 673 if (n) { 674 if (prev_engine.engine_class != 675 ci.engine_class) { 676 drm_dbg(&i915->drm, 677 "Mismatched class %d, %d\n", 678 prev_engine.engine_class, 679 ci.engine_class); 680 err = -EINVAL; 681 goto out_err; 682 } 683 } 684 685 prev_engine = ci; 686 current_mask |= siblings[n]->logical_mask; 687 } 688 689 if (i > 0) { 690 if (current_mask != prev_mask << 1) { 691 drm_dbg(&i915->drm, 692 "Non contiguous logical mask 0x%x, 0x%x\n", 693 prev_mask, current_mask); 694 err = -EINVAL; 695 goto out_err; 696 } 697 } 698 prev_mask = current_mask; 699 } 700 701 set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; 702 set->engines[slot].num_siblings = num_siblings; 703 set->engines[slot].width = width; 704 set->engines[slot].siblings = siblings; 705 706 return 0; 707 708 out_err: 709 kfree(siblings); 710 711 return err; 712 } 713 714 static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { 715 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, 716 [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, 717 [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] = 718 set_proto_ctx_engines_parallel_submit, 719 }; 720 721 static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, 722 struct i915_gem_proto_context *pc, 723 const struct drm_i915_gem_context_param *args) 724 { 725 struct drm_i915_private *i915 = fpriv->dev_priv; 726 struct set_proto_ctx_engines set = { .i915 = i915 }; 727 struct i915_context_param_engines __user *user = 728 u64_to_user_ptr(args->value); 729 unsigned int n; 730 u64 extensions; 731 int err; 732 733 if (pc->num_user_engines >= 0) { 734 drm_dbg(&i915->drm, "Cannot set engines twice"); 735 return -EINVAL; 736 } 737 738 if (args->size < sizeof(*user) || 739 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) { 740 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 741 args->size); 742 return -EINVAL; 743 } 744 745 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 746 /* RING_MASK has no shift so we can use it directly here */ 747 if (set.num_engines > I915_EXEC_RING_MASK + 1) 748 return -EINVAL; 749 750 set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL); 751 if (!set.engines) 752 return -ENOMEM; 753 754 for (n = 0; n < set.num_engines; n++) { 755 struct i915_engine_class_instance ci; 756 struct intel_engine_cs *engine; 757 758 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 759 kfree(set.engines); 760 return -EFAULT; 761 } 762 763 memset(&set.engines[n], 0, sizeof(set.engines[n])); 764 765 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 766 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) 767 continue; 768 769 engine = intel_engine_lookup_user(i915, 770 ci.engine_class, 771 ci.engine_instance); 772 if (!engine) { 773 drm_dbg(&i915->drm, 774 "Invalid engine[%d]: { class:%d, instance:%d }\n", 775 n, ci.engine_class, ci.engine_instance); 776 kfree(set.engines); 777 return -ENOENT; 778 } 779 780 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL; 781 set.engines[n].engine = engine; 782 } 783 784 err = -EFAULT; 785 if (!get_user(extensions, &user->extensions)) 786 err = i915_user_extensions(u64_to_user_ptr(extensions), 787 set_proto_ctx_engines_extensions, 788 ARRAY_SIZE(set_proto_ctx_engines_extensions), 789 &set); 790 if (err) { 791 kfree(set.engines); 792 return err; 793 } 794 795 pc->num_user_engines = set.num_engines; 796 pc->user_engines = set.engines; 797 798 return 0; 799 } 800 801 static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, 802 struct i915_gem_proto_context *pc, 803 struct drm_i915_gem_context_param *args) 804 { 805 struct drm_i915_private *i915 = fpriv->dev_priv; 806 struct drm_i915_gem_context_param_sseu user_sseu; 807 struct intel_sseu *sseu; 808 int ret; 809 810 if (args->size < sizeof(user_sseu)) 811 return -EINVAL; 812 813 if (GRAPHICS_VER(i915) != 11) 814 return -ENODEV; 815 816 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 817 sizeof(user_sseu))) 818 return -EFAULT; 819 820 if (user_sseu.rsvd) 821 return -EINVAL; 822 823 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 824 return -EINVAL; 825 826 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0)) 827 return -EINVAL; 828 829 if (pc->num_user_engines >= 0) { 830 int idx = user_sseu.engine.engine_instance; 831 struct i915_gem_proto_engine *pe; 832 833 if (idx >= pc->num_user_engines) 834 return -EINVAL; 835 836 pe = &pc->user_engines[idx]; 837 838 /* Only render engine supports RPCS configuration. */ 839 if (pe->engine->class != RENDER_CLASS) 840 return -EINVAL; 841 842 sseu = &pe->sseu; 843 } else { 844 /* Only render engine supports RPCS configuration. */ 845 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER) 846 return -EINVAL; 847 848 /* There is only one render engine */ 849 if (user_sseu.engine.engine_instance != 0) 850 return -EINVAL; 851 852 sseu = &pc->legacy_rcs_sseu; 853 } 854 855 ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu); 856 if (ret) 857 return ret; 858 859 args->size = sizeof(user_sseu); 860 861 return 0; 862 } 863 864 static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, 865 struct i915_gem_proto_context *pc, 866 struct drm_i915_gem_context_param *args) 867 { 868 int ret = 0; 869 870 switch (args->param) { 871 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 872 if (args->size) 873 ret = -EINVAL; 874 else if (args->value) 875 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE); 876 else 877 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE); 878 break; 879 880 case I915_CONTEXT_PARAM_BANNABLE: 881 if (args->size) 882 ret = -EINVAL; 883 else if (!capable(CAP_SYS_ADMIN) && !args->value) 884 ret = -EPERM; 885 else if (args->value) 886 pc->user_flags |= BIT(UCONTEXT_BANNABLE); 887 else if (pc->uses_protected_content) 888 ret = -EPERM; 889 else 890 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); 891 break; 892 893 case I915_CONTEXT_PARAM_RECOVERABLE: 894 if (args->size) 895 ret = -EINVAL; 896 else if (!args->value) 897 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); 898 else if (pc->uses_protected_content) 899 ret = -EPERM; 900 else 901 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); 902 break; 903 904 case I915_CONTEXT_PARAM_PRIORITY: 905 ret = validate_priority(fpriv->dev_priv, args); 906 if (!ret) 907 pc->sched.priority = args->value; 908 break; 909 910 case I915_CONTEXT_PARAM_SSEU: 911 ret = set_proto_ctx_sseu(fpriv, pc, args); 912 break; 913 914 case I915_CONTEXT_PARAM_VM: 915 ret = set_proto_ctx_vm(fpriv, pc, args); 916 break; 917 918 case I915_CONTEXT_PARAM_ENGINES: 919 ret = set_proto_ctx_engines(fpriv, pc, args); 920 break; 921 922 case I915_CONTEXT_PARAM_PERSISTENCE: 923 if (args->size) 924 ret = -EINVAL; 925 ret = proto_context_set_persistence(fpriv->dev_priv, pc, 926 args->value); 927 break; 928 929 case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 930 ret = proto_context_set_protected(fpriv->dev_priv, pc, 931 args->value); 932 break; 933 934 case I915_CONTEXT_PARAM_NO_ZEROMAP: 935 case I915_CONTEXT_PARAM_BAN_PERIOD: 936 case I915_CONTEXT_PARAM_RINGSIZE: 937 default: 938 ret = -EINVAL; 939 break; 940 } 941 942 return ret; 943 } 944 945 static int intel_context_set_gem(struct intel_context *ce, 946 struct i915_gem_context *ctx, 947 struct intel_sseu sseu) 948 { 949 int ret = 0; 950 951 GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 952 RCU_INIT_POINTER(ce->gem_context, ctx); 953 954 GEM_BUG_ON(intel_context_is_pinned(ce)); 955 ce->ring_size = SZ_16K; 956 957 i915_vm_put(ce->vm); 958 ce->vm = i915_gem_context_get_eb_vm(ctx); 959 960 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 961 intel_engine_has_timeslices(ce->engine) && 962 intel_engine_has_semaphores(ce->engine)) 963 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 964 965 if (CONFIG_DRM_I915_REQUEST_TIMEOUT && 966 ctx->i915->params.request_timeout_ms) { 967 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; 968 969 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000); 970 } 971 972 /* A valid SSEU has no zero fields */ 973 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) 974 ret = intel_context_reconfigure_sseu(ce, sseu); 975 976 return ret; 977 } 978 979 static void __unpin_engines(struct i915_gem_engines *e, unsigned int count) 980 { 981 while (count--) { 982 struct intel_context *ce = e->engines[count], *child; 983 984 if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) 985 continue; 986 987 for_each_child(ce, child) 988 intel_context_unpin(child); 989 intel_context_unpin(ce); 990 } 991 } 992 993 static void unpin_engines(struct i915_gem_engines *e) 994 { 995 __unpin_engines(e, e->num_engines); 996 } 997 998 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 999 { 1000 while (count--) { 1001 if (!e->engines[count]) 1002 continue; 1003 1004 intel_context_put(e->engines[count]); 1005 } 1006 kfree(e); 1007 } 1008 1009 static void free_engines(struct i915_gem_engines *e) 1010 { 1011 __free_engines(e, e->num_engines); 1012 } 1013 1014 static void free_engines_rcu(struct rcu_head *rcu) 1015 { 1016 struct i915_gem_engines *engines = 1017 container_of(rcu, struct i915_gem_engines, rcu); 1018 1019 i915_sw_fence_fini(&engines->fence); 1020 free_engines(engines); 1021 } 1022 1023 static int 1024 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 1025 { 1026 struct i915_gem_engines *engines = 1027 container_of(fence, typeof(*engines), fence); 1028 1029 switch (state) { 1030 case FENCE_COMPLETE: 1031 if (!list_empty(&engines->link)) { 1032 struct i915_gem_context *ctx = engines->ctx; 1033 unsigned long flags; 1034 1035 spin_lock_irqsave(&ctx->stale.lock, flags); 1036 list_del(&engines->link); 1037 spin_unlock_irqrestore(&ctx->stale.lock, flags); 1038 } 1039 i915_gem_context_put(engines->ctx); 1040 break; 1041 1042 case FENCE_FREE: 1043 init_rcu_head(&engines->rcu); 1044 call_rcu(&engines->rcu, free_engines_rcu); 1045 break; 1046 } 1047 1048 return NOTIFY_DONE; 1049 } 1050 1051 static struct i915_gem_engines *alloc_engines(unsigned int count) 1052 { 1053 struct i915_gem_engines *e; 1054 1055 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); 1056 if (!e) 1057 return NULL; 1058 1059 i915_sw_fence_init(&e->fence, engines_notify); 1060 return e; 1061 } 1062 1063 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx, 1064 struct intel_sseu rcs_sseu) 1065 { 1066 const struct intel_gt *gt = to_gt(ctx->i915); 1067 struct intel_engine_cs *engine; 1068 struct i915_gem_engines *e, *err; 1069 enum intel_engine_id id; 1070 1071 e = alloc_engines(I915_NUM_ENGINES); 1072 if (!e) 1073 return ERR_PTR(-ENOMEM); 1074 1075 for_each_engine(engine, gt, id) { 1076 struct intel_context *ce; 1077 struct intel_sseu sseu = {}; 1078 int ret; 1079 1080 if (engine->legacy_idx == INVALID_ENGINE) 1081 continue; 1082 1083 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 1084 GEM_BUG_ON(e->engines[engine->legacy_idx]); 1085 1086 ce = intel_context_create(engine); 1087 if (IS_ERR(ce)) { 1088 err = ERR_CAST(ce); 1089 goto free_engines; 1090 } 1091 1092 e->engines[engine->legacy_idx] = ce; 1093 e->num_engines = max(e->num_engines, engine->legacy_idx + 1); 1094 1095 if (engine->class == RENDER_CLASS) 1096 sseu = rcs_sseu; 1097 1098 ret = intel_context_set_gem(ce, ctx, sseu); 1099 if (ret) { 1100 err = ERR_PTR(ret); 1101 goto free_engines; 1102 } 1103 1104 } 1105 1106 return e; 1107 1108 free_engines: 1109 free_engines(e); 1110 return err; 1111 } 1112 1113 static int perma_pin_contexts(struct intel_context *ce) 1114 { 1115 struct intel_context *child; 1116 int i = 0, j = 0, ret; 1117 1118 GEM_BUG_ON(!intel_context_is_parent(ce)); 1119 1120 ret = intel_context_pin(ce); 1121 if (unlikely(ret)) 1122 return ret; 1123 1124 for_each_child(ce, child) { 1125 ret = intel_context_pin(child); 1126 if (unlikely(ret)) 1127 goto unwind; 1128 ++i; 1129 } 1130 1131 set_bit(CONTEXT_PERMA_PIN, &ce->flags); 1132 1133 return 0; 1134 1135 unwind: 1136 intel_context_unpin(ce); 1137 for_each_child(ce, child) { 1138 if (j++ < i) 1139 intel_context_unpin(child); 1140 else 1141 break; 1142 } 1143 1144 return ret; 1145 } 1146 1147 static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, 1148 unsigned int num_engines, 1149 struct i915_gem_proto_engine *pe) 1150 { 1151 struct i915_gem_engines *e, *err; 1152 unsigned int n; 1153 1154 e = alloc_engines(num_engines); 1155 if (!e) 1156 return ERR_PTR(-ENOMEM); 1157 e->num_engines = num_engines; 1158 1159 for (n = 0; n < num_engines; n++) { 1160 struct intel_context *ce, *child; 1161 int ret; 1162 1163 switch (pe[n].type) { 1164 case I915_GEM_ENGINE_TYPE_PHYSICAL: 1165 ce = intel_context_create(pe[n].engine); 1166 break; 1167 1168 case I915_GEM_ENGINE_TYPE_BALANCED: 1169 ce = intel_engine_create_virtual(pe[n].siblings, 1170 pe[n].num_siblings, 0); 1171 break; 1172 1173 case I915_GEM_ENGINE_TYPE_PARALLEL: 1174 ce = intel_engine_create_parallel(pe[n].siblings, 1175 pe[n].num_siblings, 1176 pe[n].width); 1177 break; 1178 1179 case I915_GEM_ENGINE_TYPE_INVALID: 1180 default: 1181 GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID); 1182 continue; 1183 } 1184 1185 if (IS_ERR(ce)) { 1186 err = ERR_CAST(ce); 1187 goto free_engines; 1188 } 1189 1190 e->engines[n] = ce; 1191 1192 ret = intel_context_set_gem(ce, ctx, pe->sseu); 1193 if (ret) { 1194 err = ERR_PTR(ret); 1195 goto free_engines; 1196 } 1197 for_each_child(ce, child) { 1198 ret = intel_context_set_gem(child, ctx, pe->sseu); 1199 if (ret) { 1200 err = ERR_PTR(ret); 1201 goto free_engines; 1202 } 1203 } 1204 1205 /* 1206 * XXX: Must be done after calling intel_context_set_gem as that 1207 * function changes the ring size. The ring is allocated when 1208 * the context is pinned. If the ring size is changed after 1209 * allocation we have a mismatch of the ring size and will cause 1210 * the context to hang. Presumably with a bit of reordering we 1211 * could move the perma-pin step to the backend function 1212 * intel_engine_create_parallel. 1213 */ 1214 if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) { 1215 ret = perma_pin_contexts(ce); 1216 if (ret) { 1217 err = ERR_PTR(ret); 1218 goto free_engines; 1219 } 1220 } 1221 } 1222 1223 return e; 1224 1225 free_engines: 1226 free_engines(e); 1227 return err; 1228 } 1229 1230 static void i915_gem_context_release_work(struct work_struct *work) 1231 { 1232 struct i915_gem_context *ctx = container_of(work, typeof(*ctx), 1233 release_work); 1234 struct i915_address_space *vm; 1235 1236 trace_i915_context_free(ctx); 1237 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 1238 1239 if (ctx->syncobj) 1240 drm_syncobj_put(ctx->syncobj); 1241 1242 vm = ctx->vm; 1243 if (vm) 1244 i915_vm_put(vm); 1245 1246 if (ctx->pxp_wakeref) 1247 intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref); 1248 1249 mutex_destroy(&ctx->engines_mutex); 1250 mutex_destroy(&ctx->lut_mutex); 1251 1252 put_pid(ctx->pid); 1253 mutex_destroy(&ctx->mutex); 1254 1255 kfree_rcu(ctx, rcu); 1256 } 1257 1258 void i915_gem_context_release(struct kref *ref) 1259 { 1260 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 1261 1262 queue_work(ctx->i915->wq, &ctx->release_work); 1263 } 1264 1265 static inline struct i915_gem_engines * 1266 __context_engines_static(const struct i915_gem_context *ctx) 1267 { 1268 return rcu_dereference_protected(ctx->engines, true); 1269 } 1270 1271 static void __reset_context(struct i915_gem_context *ctx, 1272 struct intel_engine_cs *engine) 1273 { 1274 intel_gt_handle_error(engine->gt, engine->mask, 0, 1275 "context closure in %s", ctx->name); 1276 } 1277 1278 static bool __cancel_engine(struct intel_engine_cs *engine) 1279 { 1280 /* 1281 * Send a "high priority pulse" down the engine to cause the 1282 * current request to be momentarily preempted. (If it fails to 1283 * be preempted, it will be reset). As we have marked our context 1284 * as banned, any incomplete request, including any running, will 1285 * be skipped following the preemption. 1286 * 1287 * If there is no hangchecking (one of the reasons why we try to 1288 * cancel the context) and no forced preemption, there may be no 1289 * means by which we reset the GPU and evict the persistent hog. 1290 * Ergo if we are unable to inject a preemptive pulse that can 1291 * kill the banned context, we fallback to doing a local reset 1292 * instead. 1293 */ 1294 return intel_engine_pulse(engine) == 0; 1295 } 1296 1297 static struct intel_engine_cs *active_engine(struct intel_context *ce) 1298 { 1299 struct intel_engine_cs *engine = NULL; 1300 struct i915_request *rq; 1301 1302 if (intel_context_has_inflight(ce)) 1303 return intel_context_inflight(ce); 1304 1305 if (!ce->timeline) 1306 return NULL; 1307 1308 /* 1309 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference 1310 * to the request to prevent it being transferred to a new timeline 1311 * (and onto a new timeline->requests list). 1312 */ 1313 rcu_read_lock(); 1314 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 1315 bool found; 1316 1317 /* timeline is already completed upto this point? */ 1318 if (!i915_request_get_rcu(rq)) 1319 break; 1320 1321 /* Check with the backend if the request is inflight */ 1322 found = true; 1323 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) 1324 found = i915_request_active_engine(rq, &engine); 1325 1326 i915_request_put(rq); 1327 if (found) 1328 break; 1329 } 1330 rcu_read_unlock(); 1331 1332 return engine; 1333 } 1334 1335 static void kill_engines(struct i915_gem_engines *engines, bool ban) 1336 { 1337 struct i915_gem_engines_iter it; 1338 struct intel_context *ce; 1339 1340 /* 1341 * Map the user's engine back to the actual engines; one virtual 1342 * engine will be mapped to multiple engines, and using ctx->engine[] 1343 * the same engine may be have multiple instances in the user's map. 1344 * However, we only care about pending requests, so only include 1345 * engines on which there are incomplete requests. 1346 */ 1347 for_each_gem_engine(ce, engines, it) { 1348 struct intel_engine_cs *engine; 1349 1350 if (ban && intel_context_ban(ce, NULL)) 1351 continue; 1352 1353 /* 1354 * Check the current active state of this context; if we 1355 * are currently executing on the GPU we need to evict 1356 * ourselves. On the other hand, if we haven't yet been 1357 * submitted to the GPU or if everything is complete, 1358 * we have nothing to do. 1359 */ 1360 engine = active_engine(ce); 1361 1362 /* First attempt to gracefully cancel the context */ 1363 if (engine && !__cancel_engine(engine) && ban) 1364 /* 1365 * If we are unable to send a preemptive pulse to bump 1366 * the context from the GPU, we have to resort to a full 1367 * reset. We hope the collateral damage is worth it. 1368 */ 1369 __reset_context(engines->ctx, engine); 1370 } 1371 } 1372 1373 static void kill_context(struct i915_gem_context *ctx) 1374 { 1375 bool ban = (!i915_gem_context_is_persistent(ctx) || 1376 !ctx->i915->params.enable_hangcheck); 1377 struct i915_gem_engines *pos, *next; 1378 1379 spin_lock_irq(&ctx->stale.lock); 1380 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 1381 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { 1382 if (!i915_sw_fence_await(&pos->fence)) { 1383 list_del_init(&pos->link); 1384 continue; 1385 } 1386 1387 spin_unlock_irq(&ctx->stale.lock); 1388 1389 kill_engines(pos, ban); 1390 1391 spin_lock_irq(&ctx->stale.lock); 1392 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); 1393 list_safe_reset_next(pos, next, link); 1394 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ 1395 1396 i915_sw_fence_complete(&pos->fence); 1397 } 1398 spin_unlock_irq(&ctx->stale.lock); 1399 } 1400 1401 static void engines_idle_release(struct i915_gem_context *ctx, 1402 struct i915_gem_engines *engines) 1403 { 1404 struct i915_gem_engines_iter it; 1405 struct intel_context *ce; 1406 1407 INIT_LIST_HEAD(&engines->link); 1408 1409 engines->ctx = i915_gem_context_get(ctx); 1410 1411 for_each_gem_engine(ce, engines, it) { 1412 int err; 1413 1414 /* serialises with execbuf */ 1415 set_bit(CONTEXT_CLOSED_BIT, &ce->flags); 1416 if (!intel_context_pin_if_active(ce)) 1417 continue; 1418 1419 /* Wait until context is finally scheduled out and retired */ 1420 err = i915_sw_fence_await_active(&engines->fence, 1421 &ce->active, 1422 I915_ACTIVE_AWAIT_BARRIER); 1423 intel_context_unpin(ce); 1424 if (err) 1425 goto kill; 1426 } 1427 1428 spin_lock_irq(&ctx->stale.lock); 1429 if (!i915_gem_context_is_closed(ctx)) 1430 list_add_tail(&engines->link, &ctx->stale.engines); 1431 spin_unlock_irq(&ctx->stale.lock); 1432 1433 kill: 1434 if (list_empty(&engines->link)) /* raced, already closed */ 1435 kill_engines(engines, true); 1436 1437 i915_sw_fence_commit(&engines->fence); 1438 } 1439 1440 static void set_closed_name(struct i915_gem_context *ctx) 1441 { 1442 char *s; 1443 1444 /* Replace '[]' with '<>' to indicate closed in debug prints */ 1445 1446 s = strrchr(ctx->name, '['); 1447 if (!s) 1448 return; 1449 1450 *s = '<'; 1451 1452 s = strchr(s + 1, ']'); 1453 if (s) 1454 *s = '>'; 1455 } 1456 1457 static void context_close(struct i915_gem_context *ctx) 1458 { 1459 struct i915_address_space *vm; 1460 1461 /* Flush any concurrent set_engines() */ 1462 mutex_lock(&ctx->engines_mutex); 1463 unpin_engines(__context_engines_static(ctx)); 1464 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); 1465 i915_gem_context_set_closed(ctx); 1466 mutex_unlock(&ctx->engines_mutex); 1467 1468 mutex_lock(&ctx->mutex); 1469 1470 set_closed_name(ctx); 1471 1472 vm = ctx->vm; 1473 if (vm) { 1474 /* i915_vm_close drops the final reference, which is a bit too 1475 * early and could result in surprises with concurrent 1476 * operations racing with thist ctx close. Keep a full reference 1477 * until the end. 1478 */ 1479 i915_vm_get(vm); 1480 i915_vm_close(vm); 1481 } 1482 1483 ctx->file_priv = ERR_PTR(-EBADF); 1484 1485 /* 1486 * The LUT uses the VMA as a backpointer to unref the object, 1487 * so we need to clear the LUT before we close all the VMA (inside 1488 * the ppgtt). 1489 */ 1490 lut_close(ctx); 1491 1492 spin_lock(&ctx->i915->gem.contexts.lock); 1493 list_del(&ctx->link); 1494 spin_unlock(&ctx->i915->gem.contexts.lock); 1495 1496 mutex_unlock(&ctx->mutex); 1497 1498 /* 1499 * If the user has disabled hangchecking, we can not be sure that 1500 * the batches will ever complete after the context is closed, 1501 * keeping the context and all resources pinned forever. So in this 1502 * case we opt to forcibly kill off all remaining requests on 1503 * context close. 1504 */ 1505 kill_context(ctx); 1506 1507 i915_gem_context_put(ctx); 1508 } 1509 1510 static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 1511 { 1512 if (i915_gem_context_is_persistent(ctx) == state) 1513 return 0; 1514 1515 if (state) { 1516 /* 1517 * Only contexts that are short-lived [that will expire or be 1518 * reset] are allowed to survive past termination. We require 1519 * hangcheck to ensure that the persistent requests are healthy. 1520 */ 1521 if (!ctx->i915->params.enable_hangcheck) 1522 return -EINVAL; 1523 1524 i915_gem_context_set_persistence(ctx); 1525 } else { 1526 /* To cancel a context we use "preempt-to-idle" */ 1527 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 1528 return -ENODEV; 1529 1530 /* 1531 * If the cancel fails, we then need to reset, cleanly! 1532 * 1533 * If the per-engine reset fails, all hope is lost! We resort 1534 * to a full GPU reset in that unlikely case, but realistically 1535 * if the engine could not reset, the full reset does not fare 1536 * much better. The damage has been done. 1537 * 1538 * However, if we cannot reset an engine by itself, we cannot 1539 * cleanup a hanging persistent context without causing 1540 * colateral damage, and we should not pretend we can by 1541 * exposing the interface. 1542 */ 1543 if (!intel_has_reset_engine(to_gt(ctx->i915))) 1544 return -ENODEV; 1545 1546 i915_gem_context_clear_persistence(ctx); 1547 } 1548 1549 return 0; 1550 } 1551 1552 static struct i915_gem_context * 1553 i915_gem_create_context(struct drm_i915_private *i915, 1554 const struct i915_gem_proto_context *pc) 1555 { 1556 struct i915_gem_context *ctx; 1557 struct i915_address_space *vm = NULL; 1558 struct i915_gem_engines *e; 1559 int err; 1560 int i; 1561 1562 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1563 if (!ctx) 1564 return ERR_PTR(-ENOMEM); 1565 1566 kref_init(&ctx->ref); 1567 ctx->i915 = i915; 1568 ctx->sched = pc->sched; 1569 mutex_init(&ctx->mutex); 1570 INIT_LIST_HEAD(&ctx->link); 1571 INIT_WORK(&ctx->release_work, i915_gem_context_release_work); 1572 1573 spin_lock_init(&ctx->stale.lock); 1574 INIT_LIST_HEAD(&ctx->stale.engines); 1575 1576 if (pc->vm) { 1577 vm = i915_vm_get(pc->vm); 1578 } else if (HAS_FULL_PPGTT(i915)) { 1579 struct i915_ppgtt *ppgtt; 1580 1581 ppgtt = i915_ppgtt_create(to_gt(i915), 0); 1582 if (IS_ERR(ppgtt)) { 1583 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 1584 PTR_ERR(ppgtt)); 1585 err = PTR_ERR(ppgtt); 1586 goto err_ctx; 1587 } 1588 vm = &ppgtt->vm; 1589 } 1590 if (vm) { 1591 ctx->vm = i915_vm_open(vm); 1592 1593 /* i915_vm_open() takes a reference */ 1594 i915_vm_put(vm); 1595 } 1596 1597 mutex_init(&ctx->engines_mutex); 1598 if (pc->num_user_engines >= 0) { 1599 i915_gem_context_set_user_engines(ctx); 1600 e = user_engines(ctx, pc->num_user_engines, pc->user_engines); 1601 } else { 1602 i915_gem_context_clear_user_engines(ctx); 1603 e = default_engines(ctx, pc->legacy_rcs_sseu); 1604 } 1605 if (IS_ERR(e)) { 1606 err = PTR_ERR(e); 1607 goto err_vm; 1608 } 1609 RCU_INIT_POINTER(ctx->engines, e); 1610 1611 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 1612 mutex_init(&ctx->lut_mutex); 1613 1614 /* NB: Mark all slices as needing a remap so that when the context first 1615 * loads it will restore whatever remap state already exists. If there 1616 * is no remap info, it will be a NOP. */ 1617 ctx->remap_slice = ALL_L3_SLICES(i915); 1618 1619 ctx->user_flags = pc->user_flags; 1620 1621 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 1622 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 1623 1624 if (pc->single_timeline) { 1625 err = drm_syncobj_create(&ctx->syncobj, 1626 DRM_SYNCOBJ_CREATE_SIGNALED, 1627 NULL); 1628 if (err) 1629 goto err_engines; 1630 } 1631 1632 if (pc->uses_protected_content) { 1633 ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1634 ctx->uses_protected_content = true; 1635 } 1636 1637 trace_i915_context_create(ctx); 1638 1639 return ctx; 1640 1641 err_engines: 1642 free_engines(e); 1643 err_vm: 1644 if (ctx->vm) 1645 i915_vm_close(ctx->vm); 1646 err_ctx: 1647 kfree(ctx); 1648 return ERR_PTR(err); 1649 } 1650 1651 static void init_contexts(struct i915_gem_contexts *gc) 1652 { 1653 spin_lock_init(&gc->lock); 1654 INIT_LIST_HEAD(&gc->list); 1655 } 1656 1657 void i915_gem_init__contexts(struct drm_i915_private *i915) 1658 { 1659 init_contexts(&i915->gem.contexts); 1660 } 1661 1662 static void gem_context_register(struct i915_gem_context *ctx, 1663 struct drm_i915_file_private *fpriv, 1664 u32 id) 1665 { 1666 struct drm_i915_private *i915 = ctx->i915; 1667 void *old; 1668 1669 ctx->file_priv = fpriv; 1670 1671 ctx->pid = get_task_pid(current, PIDTYPE_PID); 1672 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 1673 current->comm, pid_nr(ctx->pid)); 1674 1675 /* And finally expose ourselves to userspace via the idr */ 1676 old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); 1677 WARN_ON(old); 1678 1679 spin_lock(&i915->gem.contexts.lock); 1680 list_add_tail(&ctx->link, &i915->gem.contexts.list); 1681 spin_unlock(&i915->gem.contexts.lock); 1682 } 1683 1684 int i915_gem_context_open(struct drm_i915_private *i915, 1685 struct drm_file *file) 1686 { 1687 struct drm_i915_file_private *file_priv = file->driver_priv; 1688 struct i915_gem_proto_context *pc; 1689 struct i915_gem_context *ctx; 1690 int err; 1691 1692 mutex_init(&file_priv->proto_context_lock); 1693 xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC); 1694 1695 /* 0 reserved for the default context */ 1696 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1); 1697 1698 /* 0 reserved for invalid/unassigned ppgtt */ 1699 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 1700 1701 pc = proto_context_create(i915, 0); 1702 if (IS_ERR(pc)) { 1703 err = PTR_ERR(pc); 1704 goto err; 1705 } 1706 1707 ctx = i915_gem_create_context(i915, pc); 1708 proto_context_close(i915, pc); 1709 if (IS_ERR(ctx)) { 1710 err = PTR_ERR(ctx); 1711 goto err; 1712 } 1713 1714 gem_context_register(ctx, file_priv, 0); 1715 1716 return 0; 1717 1718 err: 1719 xa_destroy(&file_priv->vm_xa); 1720 xa_destroy(&file_priv->context_xa); 1721 xa_destroy(&file_priv->proto_context_xa); 1722 mutex_destroy(&file_priv->proto_context_lock); 1723 return err; 1724 } 1725 1726 void i915_gem_context_close(struct drm_file *file) 1727 { 1728 struct drm_i915_file_private *file_priv = file->driver_priv; 1729 struct i915_gem_proto_context *pc; 1730 struct i915_address_space *vm; 1731 struct i915_gem_context *ctx; 1732 unsigned long idx; 1733 1734 xa_for_each(&file_priv->proto_context_xa, idx, pc) 1735 proto_context_close(file_priv->dev_priv, pc); 1736 xa_destroy(&file_priv->proto_context_xa); 1737 mutex_destroy(&file_priv->proto_context_lock); 1738 1739 xa_for_each(&file_priv->context_xa, idx, ctx) 1740 context_close(ctx); 1741 xa_destroy(&file_priv->context_xa); 1742 1743 xa_for_each(&file_priv->vm_xa, idx, vm) 1744 i915_vm_put(vm); 1745 xa_destroy(&file_priv->vm_xa); 1746 } 1747 1748 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 1749 struct drm_file *file) 1750 { 1751 struct drm_i915_private *i915 = to_i915(dev); 1752 struct drm_i915_gem_vm_control *args = data; 1753 struct drm_i915_file_private *file_priv = file->driver_priv; 1754 struct i915_ppgtt *ppgtt; 1755 u32 id; 1756 int err; 1757 1758 if (!HAS_FULL_PPGTT(i915)) 1759 return -ENODEV; 1760 1761 if (args->flags) 1762 return -EINVAL; 1763 1764 ppgtt = i915_ppgtt_create(to_gt(i915), 0); 1765 if (IS_ERR(ppgtt)) 1766 return PTR_ERR(ppgtt); 1767 1768 if (args->extensions) { 1769 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 1770 NULL, 0, 1771 ppgtt); 1772 if (err) 1773 goto err_put; 1774 } 1775 1776 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 1777 xa_limit_32b, GFP_KERNEL); 1778 if (err) 1779 goto err_put; 1780 1781 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1782 args->vm_id = id; 1783 return 0; 1784 1785 err_put: 1786 i915_vm_put(&ppgtt->vm); 1787 return err; 1788 } 1789 1790 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 1791 struct drm_file *file) 1792 { 1793 struct drm_i915_file_private *file_priv = file->driver_priv; 1794 struct drm_i915_gem_vm_control *args = data; 1795 struct i915_address_space *vm; 1796 1797 if (args->flags) 1798 return -EINVAL; 1799 1800 if (args->extensions) 1801 return -EINVAL; 1802 1803 vm = xa_erase(&file_priv->vm_xa, args->vm_id); 1804 if (!vm) 1805 return -ENOENT; 1806 1807 i915_vm_put(vm); 1808 return 0; 1809 } 1810 1811 static int get_ppgtt(struct drm_i915_file_private *file_priv, 1812 struct i915_gem_context *ctx, 1813 struct drm_i915_gem_context_param *args) 1814 { 1815 struct i915_address_space *vm; 1816 int err; 1817 u32 id; 1818 1819 if (!i915_gem_context_has_full_ppgtt(ctx)) 1820 return -ENODEV; 1821 1822 vm = ctx->vm; 1823 GEM_BUG_ON(!vm); 1824 1825 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 1826 if (err) 1827 return err; 1828 1829 i915_vm_open(vm); 1830 1831 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1832 args->value = id; 1833 args->size = 0; 1834 1835 return err; 1836 } 1837 1838 int 1839 i915_gem_user_to_context_sseu(struct intel_gt *gt, 1840 const struct drm_i915_gem_context_param_sseu *user, 1841 struct intel_sseu *context) 1842 { 1843 const struct sseu_dev_info *device = >->info.sseu; 1844 struct drm_i915_private *i915 = gt->i915; 1845 1846 /* No zeros in any field. */ 1847 if (!user->slice_mask || !user->subslice_mask || 1848 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1849 return -EINVAL; 1850 1851 /* Max > min. */ 1852 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1853 return -EINVAL; 1854 1855 /* 1856 * Some future proofing on the types since the uAPI is wider than the 1857 * current internal implementation. 1858 */ 1859 if (overflows_type(user->slice_mask, context->slice_mask) || 1860 overflows_type(user->subslice_mask, context->subslice_mask) || 1861 overflows_type(user->min_eus_per_subslice, 1862 context->min_eus_per_subslice) || 1863 overflows_type(user->max_eus_per_subslice, 1864 context->max_eus_per_subslice)) 1865 return -EINVAL; 1866 1867 /* Check validity against hardware. */ 1868 if (user->slice_mask & ~device->slice_mask) 1869 return -EINVAL; 1870 1871 if (user->subslice_mask & ~device->subslice_mask[0]) 1872 return -EINVAL; 1873 1874 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1875 return -EINVAL; 1876 1877 context->slice_mask = user->slice_mask; 1878 context->subslice_mask = user->subslice_mask; 1879 context->min_eus_per_subslice = user->min_eus_per_subslice; 1880 context->max_eus_per_subslice = user->max_eus_per_subslice; 1881 1882 /* Part specific restrictions. */ 1883 if (GRAPHICS_VER(i915) == 11) { 1884 unsigned int hw_s = hweight8(device->slice_mask); 1885 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 1886 unsigned int req_s = hweight8(context->slice_mask); 1887 unsigned int req_ss = hweight8(context->subslice_mask); 1888 1889 /* 1890 * Only full subslice enablement is possible if more than one 1891 * slice is turned on. 1892 */ 1893 if (req_s > 1 && req_ss != hw_ss_per_s) 1894 return -EINVAL; 1895 1896 /* 1897 * If more than four (SScount bitfield limit) subslices are 1898 * requested then the number has to be even. 1899 */ 1900 if (req_ss > 4 && (req_ss & 1)) 1901 return -EINVAL; 1902 1903 /* 1904 * If only one slice is enabled and subslice count is below the 1905 * device full enablement, it must be at most half of the all 1906 * available subslices. 1907 */ 1908 if (req_s == 1 && req_ss < hw_ss_per_s && 1909 req_ss > (hw_ss_per_s / 2)) 1910 return -EINVAL; 1911 1912 /* ABI restriction - VME use case only. */ 1913 1914 /* All slices or one slice only. */ 1915 if (req_s != 1 && req_s != hw_s) 1916 return -EINVAL; 1917 1918 /* 1919 * Half subslices or full enablement only when one slice is 1920 * enabled. 1921 */ 1922 if (req_s == 1 && 1923 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1924 return -EINVAL; 1925 1926 /* No EU configuration changes. */ 1927 if ((user->min_eus_per_subslice != 1928 device->max_eus_per_subslice) || 1929 (user->max_eus_per_subslice != 1930 device->max_eus_per_subslice)) 1931 return -EINVAL; 1932 } 1933 1934 return 0; 1935 } 1936 1937 static int set_sseu(struct i915_gem_context *ctx, 1938 struct drm_i915_gem_context_param *args) 1939 { 1940 struct drm_i915_private *i915 = ctx->i915; 1941 struct drm_i915_gem_context_param_sseu user_sseu; 1942 struct intel_context *ce; 1943 struct intel_sseu sseu; 1944 unsigned long lookup; 1945 int ret; 1946 1947 if (args->size < sizeof(user_sseu)) 1948 return -EINVAL; 1949 1950 if (GRAPHICS_VER(i915) != 11) 1951 return -ENODEV; 1952 1953 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1954 sizeof(user_sseu))) 1955 return -EFAULT; 1956 1957 if (user_sseu.rsvd) 1958 return -EINVAL; 1959 1960 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1961 return -EINVAL; 1962 1963 lookup = 0; 1964 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 1965 lookup |= LOOKUP_USER_INDEX; 1966 1967 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 1968 if (IS_ERR(ce)) 1969 return PTR_ERR(ce); 1970 1971 /* Only render engine supports RPCS configuration. */ 1972 if (ce->engine->class != RENDER_CLASS) { 1973 ret = -ENODEV; 1974 goto out_ce; 1975 } 1976 1977 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); 1978 if (ret) 1979 goto out_ce; 1980 1981 ret = intel_context_reconfigure_sseu(ce, sseu); 1982 if (ret) 1983 goto out_ce; 1984 1985 args->size = sizeof(user_sseu); 1986 1987 out_ce: 1988 intel_context_put(ce); 1989 return ret; 1990 } 1991 1992 static int 1993 set_persistence(struct i915_gem_context *ctx, 1994 const struct drm_i915_gem_context_param *args) 1995 { 1996 if (args->size) 1997 return -EINVAL; 1998 1999 return __context_set_persistence(ctx, args->value); 2000 } 2001 2002 static int set_priority(struct i915_gem_context *ctx, 2003 const struct drm_i915_gem_context_param *args) 2004 { 2005 struct i915_gem_engines_iter it; 2006 struct intel_context *ce; 2007 int err; 2008 2009 err = validate_priority(ctx->i915, args); 2010 if (err) 2011 return err; 2012 2013 ctx->sched.priority = args->value; 2014 2015 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 2016 if (!intel_engine_has_timeslices(ce->engine)) 2017 continue; 2018 2019 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 2020 intel_engine_has_semaphores(ce->engine)) 2021 intel_context_set_use_semaphores(ce); 2022 else 2023 intel_context_clear_use_semaphores(ce); 2024 } 2025 i915_gem_context_unlock_engines(ctx); 2026 2027 return 0; 2028 } 2029 2030 static int get_protected(struct i915_gem_context *ctx, 2031 struct drm_i915_gem_context_param *args) 2032 { 2033 args->size = 0; 2034 args->value = i915_gem_context_uses_protected_content(ctx); 2035 2036 return 0; 2037 } 2038 2039 static int ctx_setparam(struct drm_i915_file_private *fpriv, 2040 struct i915_gem_context *ctx, 2041 struct drm_i915_gem_context_param *args) 2042 { 2043 int ret = 0; 2044 2045 switch (args->param) { 2046 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2047 if (args->size) 2048 ret = -EINVAL; 2049 else if (args->value) 2050 i915_gem_context_set_no_error_capture(ctx); 2051 else 2052 i915_gem_context_clear_no_error_capture(ctx); 2053 break; 2054 2055 case I915_CONTEXT_PARAM_BANNABLE: 2056 if (args->size) 2057 ret = -EINVAL; 2058 else if (!capable(CAP_SYS_ADMIN) && !args->value) 2059 ret = -EPERM; 2060 else if (args->value) 2061 i915_gem_context_set_bannable(ctx); 2062 else if (i915_gem_context_uses_protected_content(ctx)) 2063 ret = -EPERM; /* can't clear this for protected contexts */ 2064 else 2065 i915_gem_context_clear_bannable(ctx); 2066 break; 2067 2068 case I915_CONTEXT_PARAM_RECOVERABLE: 2069 if (args->size) 2070 ret = -EINVAL; 2071 else if (!args->value) 2072 i915_gem_context_clear_recoverable(ctx); 2073 else if (i915_gem_context_uses_protected_content(ctx)) 2074 ret = -EPERM; /* can't set this for protected contexts */ 2075 else 2076 i915_gem_context_set_recoverable(ctx); 2077 break; 2078 2079 case I915_CONTEXT_PARAM_PRIORITY: 2080 ret = set_priority(ctx, args); 2081 break; 2082 2083 case I915_CONTEXT_PARAM_SSEU: 2084 ret = set_sseu(ctx, args); 2085 break; 2086 2087 case I915_CONTEXT_PARAM_PERSISTENCE: 2088 ret = set_persistence(ctx, args); 2089 break; 2090 2091 case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 2092 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2093 case I915_CONTEXT_PARAM_BAN_PERIOD: 2094 case I915_CONTEXT_PARAM_RINGSIZE: 2095 case I915_CONTEXT_PARAM_VM: 2096 case I915_CONTEXT_PARAM_ENGINES: 2097 default: 2098 ret = -EINVAL; 2099 break; 2100 } 2101 2102 return ret; 2103 } 2104 2105 struct create_ext { 2106 struct i915_gem_proto_context *pc; 2107 struct drm_i915_file_private *fpriv; 2108 }; 2109 2110 static int create_setparam(struct i915_user_extension __user *ext, void *data) 2111 { 2112 struct drm_i915_gem_context_create_ext_setparam local; 2113 const struct create_ext *arg = data; 2114 2115 if (copy_from_user(&local, ext, sizeof(local))) 2116 return -EFAULT; 2117 2118 if (local.param.ctx_id) 2119 return -EINVAL; 2120 2121 return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param); 2122 } 2123 2124 static int invalid_ext(struct i915_user_extension __user *ext, void *data) 2125 { 2126 return -EINVAL; 2127 } 2128 2129 static const i915_user_extension_fn create_extensions[] = { 2130 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 2131 [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext, 2132 }; 2133 2134 static bool client_is_banned(struct drm_i915_file_private *file_priv) 2135 { 2136 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 2137 } 2138 2139 static inline struct i915_gem_context * 2140 __context_lookup(struct drm_i915_file_private *file_priv, u32 id) 2141 { 2142 struct i915_gem_context *ctx; 2143 2144 rcu_read_lock(); 2145 ctx = xa_load(&file_priv->context_xa, id); 2146 if (ctx && !kref_get_unless_zero(&ctx->ref)) 2147 ctx = NULL; 2148 rcu_read_unlock(); 2149 2150 return ctx; 2151 } 2152 2153 static struct i915_gem_context * 2154 finalize_create_context_locked(struct drm_i915_file_private *file_priv, 2155 struct i915_gem_proto_context *pc, u32 id) 2156 { 2157 struct i915_gem_context *ctx; 2158 void *old; 2159 2160 lockdep_assert_held(&file_priv->proto_context_lock); 2161 2162 ctx = i915_gem_create_context(file_priv->dev_priv, pc); 2163 if (IS_ERR(ctx)) 2164 return ctx; 2165 2166 gem_context_register(ctx, file_priv, id); 2167 2168 old = xa_erase(&file_priv->proto_context_xa, id); 2169 GEM_BUG_ON(old != pc); 2170 proto_context_close(file_priv->dev_priv, pc); 2171 2172 /* One for the xarray and one for the caller */ 2173 return i915_gem_context_get(ctx); 2174 } 2175 2176 struct i915_gem_context * 2177 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 2178 { 2179 struct i915_gem_proto_context *pc; 2180 struct i915_gem_context *ctx; 2181 2182 ctx = __context_lookup(file_priv, id); 2183 if (ctx) 2184 return ctx; 2185 2186 mutex_lock(&file_priv->proto_context_lock); 2187 /* Try one more time under the lock */ 2188 ctx = __context_lookup(file_priv, id); 2189 if (!ctx) { 2190 pc = xa_load(&file_priv->proto_context_xa, id); 2191 if (!pc) 2192 ctx = ERR_PTR(-ENOENT); 2193 else 2194 ctx = finalize_create_context_locked(file_priv, pc, id); 2195 } 2196 mutex_unlock(&file_priv->proto_context_lock); 2197 2198 return ctx; 2199 } 2200 2201 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2202 struct drm_file *file) 2203 { 2204 struct drm_i915_private *i915 = to_i915(dev); 2205 struct drm_i915_gem_context_create_ext *args = data; 2206 struct create_ext ext_data; 2207 int ret; 2208 u32 id; 2209 2210 if (!DRIVER_CAPS(i915)->has_logical_contexts) 2211 return -ENODEV; 2212 2213 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 2214 return -EINVAL; 2215 2216 ret = intel_gt_terminally_wedged(to_gt(i915)); 2217 if (ret) 2218 return ret; 2219 2220 ext_data.fpriv = file->driver_priv; 2221 if (client_is_banned(ext_data.fpriv)) { 2222 drm_dbg(&i915->drm, 2223 "client %s[%d] banned from creating ctx\n", 2224 current->comm, task_pid_nr(current)); 2225 return -EIO; 2226 } 2227 2228 ext_data.pc = proto_context_create(i915, args->flags); 2229 if (IS_ERR(ext_data.pc)) 2230 return PTR_ERR(ext_data.pc); 2231 2232 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 2233 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 2234 create_extensions, 2235 ARRAY_SIZE(create_extensions), 2236 &ext_data); 2237 if (ret) 2238 goto err_pc; 2239 } 2240 2241 if (GRAPHICS_VER(i915) > 12) { 2242 struct i915_gem_context *ctx; 2243 2244 /* Get ourselves a context ID */ 2245 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL, 2246 xa_limit_32b, GFP_KERNEL); 2247 if (ret) 2248 goto err_pc; 2249 2250 ctx = i915_gem_create_context(i915, ext_data.pc); 2251 if (IS_ERR(ctx)) { 2252 ret = PTR_ERR(ctx); 2253 goto err_pc; 2254 } 2255 2256 proto_context_close(i915, ext_data.pc); 2257 gem_context_register(ctx, ext_data.fpriv, id); 2258 } else { 2259 ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id); 2260 if (ret < 0) 2261 goto err_pc; 2262 } 2263 2264 args->ctx_id = id; 2265 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); 2266 2267 return 0; 2268 2269 err_pc: 2270 proto_context_close(i915, ext_data.pc); 2271 return ret; 2272 } 2273 2274 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2275 struct drm_file *file) 2276 { 2277 struct drm_i915_gem_context_destroy *args = data; 2278 struct drm_i915_file_private *file_priv = file->driver_priv; 2279 struct i915_gem_proto_context *pc; 2280 struct i915_gem_context *ctx; 2281 2282 if (args->pad != 0) 2283 return -EINVAL; 2284 2285 if (!args->ctx_id) 2286 return -ENOENT; 2287 2288 /* We need to hold the proto-context lock here to prevent races 2289 * with finalize_create_context_locked(). 2290 */ 2291 mutex_lock(&file_priv->proto_context_lock); 2292 ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2293 pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id); 2294 mutex_unlock(&file_priv->proto_context_lock); 2295 2296 if (!ctx && !pc) 2297 return -ENOENT; 2298 GEM_WARN_ON(ctx && pc); 2299 2300 if (pc) 2301 proto_context_close(file_priv->dev_priv, pc); 2302 2303 if (ctx) 2304 context_close(ctx); 2305 2306 return 0; 2307 } 2308 2309 static int get_sseu(struct i915_gem_context *ctx, 2310 struct drm_i915_gem_context_param *args) 2311 { 2312 struct drm_i915_gem_context_param_sseu user_sseu; 2313 struct intel_context *ce; 2314 unsigned long lookup; 2315 int err; 2316 2317 if (args->size == 0) 2318 goto out; 2319 else if (args->size < sizeof(user_sseu)) 2320 return -EINVAL; 2321 2322 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2323 sizeof(user_sseu))) 2324 return -EFAULT; 2325 2326 if (user_sseu.rsvd) 2327 return -EINVAL; 2328 2329 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2330 return -EINVAL; 2331 2332 lookup = 0; 2333 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2334 lookup |= LOOKUP_USER_INDEX; 2335 2336 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2337 if (IS_ERR(ce)) 2338 return PTR_ERR(ce); 2339 2340 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2341 if (err) { 2342 intel_context_put(ce); 2343 return err; 2344 } 2345 2346 user_sseu.slice_mask = ce->sseu.slice_mask; 2347 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2348 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2349 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2350 2351 intel_context_unlock_pinned(ce); 2352 intel_context_put(ce); 2353 2354 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2355 sizeof(user_sseu))) 2356 return -EFAULT; 2357 2358 out: 2359 args->size = sizeof(user_sseu); 2360 2361 return 0; 2362 } 2363 2364 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2365 struct drm_file *file) 2366 { 2367 struct drm_i915_file_private *file_priv = file->driver_priv; 2368 struct drm_i915_gem_context_param *args = data; 2369 struct i915_gem_context *ctx; 2370 struct i915_address_space *vm; 2371 int ret = 0; 2372 2373 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2374 if (IS_ERR(ctx)) 2375 return PTR_ERR(ctx); 2376 2377 switch (args->param) { 2378 case I915_CONTEXT_PARAM_GTT_SIZE: 2379 args->size = 0; 2380 vm = i915_gem_context_get_eb_vm(ctx); 2381 args->value = vm->total; 2382 i915_vm_put(vm); 2383 2384 break; 2385 2386 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2387 args->size = 0; 2388 args->value = i915_gem_context_no_error_capture(ctx); 2389 break; 2390 2391 case I915_CONTEXT_PARAM_BANNABLE: 2392 args->size = 0; 2393 args->value = i915_gem_context_is_bannable(ctx); 2394 break; 2395 2396 case I915_CONTEXT_PARAM_RECOVERABLE: 2397 args->size = 0; 2398 args->value = i915_gem_context_is_recoverable(ctx); 2399 break; 2400 2401 case I915_CONTEXT_PARAM_PRIORITY: 2402 args->size = 0; 2403 args->value = ctx->sched.priority; 2404 break; 2405 2406 case I915_CONTEXT_PARAM_SSEU: 2407 ret = get_sseu(ctx, args); 2408 break; 2409 2410 case I915_CONTEXT_PARAM_VM: 2411 ret = get_ppgtt(file_priv, ctx, args); 2412 break; 2413 2414 case I915_CONTEXT_PARAM_PERSISTENCE: 2415 args->size = 0; 2416 args->value = i915_gem_context_is_persistent(ctx); 2417 break; 2418 2419 case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 2420 ret = get_protected(ctx, args); 2421 break; 2422 2423 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2424 case I915_CONTEXT_PARAM_BAN_PERIOD: 2425 case I915_CONTEXT_PARAM_ENGINES: 2426 case I915_CONTEXT_PARAM_RINGSIZE: 2427 default: 2428 ret = -EINVAL; 2429 break; 2430 } 2431 2432 i915_gem_context_put(ctx); 2433 return ret; 2434 } 2435 2436 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2437 struct drm_file *file) 2438 { 2439 struct drm_i915_file_private *file_priv = file->driver_priv; 2440 struct drm_i915_gem_context_param *args = data; 2441 struct i915_gem_proto_context *pc; 2442 struct i915_gem_context *ctx; 2443 int ret = 0; 2444 2445 mutex_lock(&file_priv->proto_context_lock); 2446 ctx = __context_lookup(file_priv, args->ctx_id); 2447 if (!ctx) { 2448 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id); 2449 if (pc) { 2450 /* Contexts should be finalized inside 2451 * GEM_CONTEXT_CREATE starting with graphics 2452 * version 13. 2453 */ 2454 WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12); 2455 ret = set_proto_ctx_param(file_priv, pc, args); 2456 } else { 2457 ret = -ENOENT; 2458 } 2459 } 2460 mutex_unlock(&file_priv->proto_context_lock); 2461 2462 if (ctx) { 2463 ret = ctx_setparam(file_priv, ctx, args); 2464 i915_gem_context_put(ctx); 2465 } 2466 2467 return ret; 2468 } 2469 2470 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2471 void *data, struct drm_file *file) 2472 { 2473 struct drm_i915_private *i915 = to_i915(dev); 2474 struct drm_i915_reset_stats *args = data; 2475 struct i915_gem_context *ctx; 2476 2477 if (args->flags || args->pad) 2478 return -EINVAL; 2479 2480 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); 2481 if (IS_ERR(ctx)) 2482 return PTR_ERR(ctx); 2483 2484 /* 2485 * We opt for unserialised reads here. This may result in tearing 2486 * in the extremely unlikely event of a GPU hang on this context 2487 * as we are querying them. If we need that extra layer of protection, 2488 * we should wrap the hangstats with a seqlock. 2489 */ 2490 2491 if (capable(CAP_SYS_ADMIN)) 2492 args->reset_count = i915_reset_count(&i915->gpu_error); 2493 else 2494 args->reset_count = 0; 2495 2496 args->batch_active = atomic_read(&ctx->guilty_count); 2497 args->batch_pending = atomic_read(&ctx->active_count); 2498 2499 i915_gem_context_put(ctx); 2500 return 0; 2501 } 2502 2503 /* GEM context-engines iterator: for_each_gem_engine() */ 2504 struct intel_context * 2505 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2506 { 2507 const struct i915_gem_engines *e = it->engines; 2508 struct intel_context *ctx; 2509 2510 if (unlikely(!e)) 2511 return NULL; 2512 2513 do { 2514 if (it->idx >= e->num_engines) 2515 return NULL; 2516 2517 ctx = e->engines[it->idx++]; 2518 } while (!ctx); 2519 2520 return ctx; 2521 } 2522 2523 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2524 #include "selftests/mock_context.c" 2525 #include "selftests/i915_gem_context.c" 2526 #endif 2527 2528 void i915_gem_context_module_exit(void) 2529 { 2530 kmem_cache_destroy(slab_luts); 2531 } 2532 2533 int __init i915_gem_context_module_init(void) 2534 { 2535 slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2536 if (!slab_luts) 2537 return -ENOMEM; 2538 2539 return 0; 2540 } 2541