1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2011-2012 Intel Corporation 5 */ 6 7 /* 8 * This file implements HW context support. On gen5+ a HW context consists of an 9 * opaque GPU object which is referenced at times of context saves and restores. 10 * With RC6 enabled, the context is also referenced as the GPU enters and exists 11 * from RC6 (GPU has it's own internal power context, except on gen5). Though 12 * something like a context does exist for the media ring, the code only 13 * supports contexts for the render ring. 14 * 15 * In software, there is a distinction between contexts created by the user, 16 * and the default HW context. The default HW context is used by GPU clients 17 * that do not request setup of their own hardware context. The default 18 * context's state is never restored to help prevent programming errors. This 19 * would happen if a client ran and piggy-backed off another clients GPU state. 20 * The default context only exists to give the GPU some offset to load as the 21 * current to invoke a save of the context we actually care about. In fact, the 22 * code could likely be constructed, albeit in a more complicated fashion, to 23 * never use the default context, though that limits the driver's ability to 24 * swap out, and/or destroy other contexts. 25 * 26 * All other contexts are created as a request by the GPU client. These contexts 27 * store GPU state, and thus allow GPU clients to not re-emit state (and 28 * potentially query certain state) at any time. The kernel driver makes 29 * certain that the appropriate commands are inserted. 30 * 31 * The context life cycle is semi-complicated in that context BOs may live 32 * longer than the context itself because of the way the hardware, and object 33 * tracking works. Below is a very crude representation of the state machine 34 * describing the context life. 35 * refcount pincount active 36 * S0: initial state 0 0 0 37 * S1: context created 1 0 0 38 * S2: context is currently running 2 1 X 39 * S3: GPU referenced, but not current 2 0 1 40 * S4: context is current, but destroyed 1 1 0 41 * S5: like S3, but destroyed 1 0 1 42 * 43 * The most common (but not all) transitions: 44 * S0->S1: client creates a context 45 * S1->S2: client submits execbuf with context 46 * S2->S3: other clients submits execbuf with context 47 * S3->S1: context object was retired 48 * S3->S2: clients submits another execbuf 49 * S2->S4: context destroy called with current context 50 * S3->S5->S0: destroy path 51 * S4->S5->S0: destroy path on current context 52 * 53 * There are two confusing terms used above: 54 * The "current context" means the context which is currently running on the 55 * GPU. The GPU has loaded its state already and has stored away the gtt 56 * offset of the BO. The GPU is not actively referencing the data at this 57 * offset, but it will on the next context switch. The only way to avoid this 58 * is to do a GPU reset. 59 * 60 * An "active context' is one which was previously the "current context" and is 61 * on the active list waiting for the next context switch to occur. Until this 62 * happens, the object must remain at the same gtt offset. It is therefore 63 * possible to destroy a context, but it is still active. 64 * 65 */ 66 67 #include <linux/highmem.h> 68 #include <linux/log2.h> 69 #include <linux/nospec.h> 70 71 #include <drm/drm_cache.h> 72 #include <drm/drm_syncobj.h> 73 74 #include "gt/gen6_ppgtt.h" 75 #include "gt/intel_context.h" 76 #include "gt/intel_context_param.h" 77 #include "gt/intel_engine_heartbeat.h" 78 #include "gt/intel_engine_user.h" 79 #include "gt/intel_gpu_commands.h" 80 #include "gt/intel_ring.h" 81 82 #include "pxp/intel_pxp.h" 83 84 #include "i915_file_private.h" 85 #include "i915_gem_context.h" 86 #include "i915_trace.h" 87 #include "i915_user_extensions.h" 88 89 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 90 91 static struct kmem_cache *slab_luts; 92 93 struct i915_lut_handle *i915_lut_handle_alloc(void) 94 { 95 return kmem_cache_alloc(slab_luts, GFP_KERNEL); 96 } 97 98 void i915_lut_handle_free(struct i915_lut_handle *lut) 99 { 100 return kmem_cache_free(slab_luts, lut); 101 } 102 103 static void lut_close(struct i915_gem_context *ctx) 104 { 105 struct radix_tree_iter iter; 106 void __rcu **slot; 107 108 mutex_lock(&ctx->lut_mutex); 109 rcu_read_lock(); 110 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 111 struct i915_vma *vma = rcu_dereference_raw(*slot); 112 struct drm_i915_gem_object *obj = vma->obj; 113 struct i915_lut_handle *lut; 114 115 if (!kref_get_unless_zero(&obj->base.refcount)) 116 continue; 117 118 spin_lock(&obj->lut_lock); 119 list_for_each_entry(lut, &obj->lut_list, obj_link) { 120 if (lut->ctx != ctx) 121 continue; 122 123 if (lut->handle != iter.index) 124 continue; 125 126 list_del(&lut->obj_link); 127 break; 128 } 129 spin_unlock(&obj->lut_lock); 130 131 if (&lut->obj_link != &obj->lut_list) { 132 i915_lut_handle_free(lut); 133 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 134 i915_vma_close(vma); 135 i915_gem_object_put(obj); 136 } 137 138 i915_gem_object_put(obj); 139 } 140 rcu_read_unlock(); 141 mutex_unlock(&ctx->lut_mutex); 142 } 143 144 static struct intel_context * 145 lookup_user_engine(struct i915_gem_context *ctx, 146 unsigned long flags, 147 const struct i915_engine_class_instance *ci) 148 #define LOOKUP_USER_INDEX BIT(0) 149 { 150 int idx; 151 152 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 153 return ERR_PTR(-EINVAL); 154 155 if (!i915_gem_context_user_engines(ctx)) { 156 struct intel_engine_cs *engine; 157 158 engine = intel_engine_lookup_user(ctx->i915, 159 ci->engine_class, 160 ci->engine_instance); 161 if (!engine) 162 return ERR_PTR(-EINVAL); 163 164 idx = engine->legacy_idx; 165 } else { 166 idx = ci->engine_instance; 167 } 168 169 return i915_gem_context_get_engine(ctx, idx); 170 } 171 172 static int validate_priority(struct drm_i915_private *i915, 173 const struct drm_i915_gem_context_param *args) 174 { 175 s64 priority = args->value; 176 177 if (args->size) 178 return -EINVAL; 179 180 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 181 return -ENODEV; 182 183 if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 184 priority < I915_CONTEXT_MIN_USER_PRIORITY) 185 return -EINVAL; 186 187 if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 188 !capable(CAP_SYS_NICE)) 189 return -EPERM; 190 191 return 0; 192 } 193 194 static void proto_context_close(struct drm_i915_private *i915, 195 struct i915_gem_proto_context *pc) 196 { 197 int i; 198 199 if (pc->pxp_wakeref) 200 intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref); 201 if (pc->vm) 202 i915_vm_put(pc->vm); 203 if (pc->user_engines) { 204 for (i = 0; i < pc->num_user_engines; i++) 205 kfree(pc->user_engines[i].siblings); 206 kfree(pc->user_engines); 207 } 208 kfree(pc); 209 } 210 211 static int proto_context_set_persistence(struct drm_i915_private *i915, 212 struct i915_gem_proto_context *pc, 213 bool persist) 214 { 215 if (persist) { 216 /* 217 * Only contexts that are short-lived [that will expire or be 218 * reset] are allowed to survive past termination. We require 219 * hangcheck to ensure that the persistent requests are healthy. 220 */ 221 if (!i915->params.enable_hangcheck) 222 return -EINVAL; 223 224 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); 225 } else { 226 /* To cancel a context we use "preempt-to-idle" */ 227 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 228 return -ENODEV; 229 230 /* 231 * If the cancel fails, we then need to reset, cleanly! 232 * 233 * If the per-engine reset fails, all hope is lost! We resort 234 * to a full GPU reset in that unlikely case, but realistically 235 * if the engine could not reset, the full reset does not fare 236 * much better. The damage has been done. 237 * 238 * However, if we cannot reset an engine by itself, we cannot 239 * cleanup a hanging persistent context without causing 240 * colateral damage, and we should not pretend we can by 241 * exposing the interface. 242 */ 243 if (!intel_has_reset_engine(to_gt(i915))) 244 return -ENODEV; 245 246 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); 247 } 248 249 return 0; 250 } 251 252 static int proto_context_set_protected(struct drm_i915_private *i915, 253 struct i915_gem_proto_context *pc, 254 bool protected) 255 { 256 int ret = 0; 257 258 if (!protected) { 259 pc->uses_protected_content = false; 260 } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) { 261 ret = -ENODEV; 262 } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || 263 !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { 264 ret = -EPERM; 265 } else { 266 pc->uses_protected_content = true; 267 268 /* 269 * protected context usage requires the PXP session to be up, 270 * which in turn requires the device to be active. 271 */ 272 pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); 273 274 if (!intel_pxp_is_active(&to_gt(i915)->pxp)) 275 ret = intel_pxp_start(&to_gt(i915)->pxp); 276 } 277 278 return ret; 279 } 280 281 static struct i915_gem_proto_context * 282 proto_context_create(struct drm_i915_private *i915, unsigned int flags) 283 { 284 struct i915_gem_proto_context *pc, *err; 285 286 pc = kzalloc(sizeof(*pc), GFP_KERNEL); 287 if (!pc) 288 return ERR_PTR(-ENOMEM); 289 290 pc->num_user_engines = -1; 291 pc->user_engines = NULL; 292 pc->user_flags = BIT(UCONTEXT_BANNABLE) | 293 BIT(UCONTEXT_RECOVERABLE); 294 if (i915->params.enable_hangcheck) 295 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); 296 pc->sched.priority = I915_PRIORITY_NORMAL; 297 298 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 299 if (!HAS_EXECLISTS(i915)) { 300 err = ERR_PTR(-EINVAL); 301 goto proto_close; 302 } 303 pc->single_timeline = true; 304 } 305 306 return pc; 307 308 proto_close: 309 proto_context_close(i915, pc); 310 return err; 311 } 312 313 static int proto_context_register_locked(struct drm_i915_file_private *fpriv, 314 struct i915_gem_proto_context *pc, 315 u32 *id) 316 { 317 int ret; 318 void *old; 319 320 lockdep_assert_held(&fpriv->proto_context_lock); 321 322 ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL); 323 if (ret) 324 return ret; 325 326 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL); 327 if (xa_is_err(old)) { 328 xa_erase(&fpriv->context_xa, *id); 329 return xa_err(old); 330 } 331 WARN_ON(old); 332 333 return 0; 334 } 335 336 static int proto_context_register(struct drm_i915_file_private *fpriv, 337 struct i915_gem_proto_context *pc, 338 u32 *id) 339 { 340 int ret; 341 342 mutex_lock(&fpriv->proto_context_lock); 343 ret = proto_context_register_locked(fpriv, pc, id); 344 mutex_unlock(&fpriv->proto_context_lock); 345 346 return ret; 347 } 348 349 static struct i915_address_space * 350 i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) 351 { 352 struct i915_address_space *vm; 353 354 xa_lock(&file_priv->vm_xa); 355 vm = xa_load(&file_priv->vm_xa, id); 356 if (vm) 357 kref_get(&vm->ref); 358 xa_unlock(&file_priv->vm_xa); 359 360 return vm; 361 } 362 363 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv, 364 struct i915_gem_proto_context *pc, 365 const struct drm_i915_gem_context_param *args) 366 { 367 struct drm_i915_private *i915 = fpriv->dev_priv; 368 struct i915_address_space *vm; 369 370 if (args->size) 371 return -EINVAL; 372 373 if (!HAS_FULL_PPGTT(i915)) 374 return -ENODEV; 375 376 if (upper_32_bits(args->value)) 377 return -ENOENT; 378 379 vm = i915_gem_vm_lookup(fpriv, args->value); 380 if (!vm) 381 return -ENOENT; 382 383 if (pc->vm) 384 i915_vm_put(pc->vm); 385 pc->vm = vm; 386 387 return 0; 388 } 389 390 struct set_proto_ctx_engines { 391 struct drm_i915_private *i915; 392 unsigned num_engines; 393 struct i915_gem_proto_engine *engines; 394 }; 395 396 static int 397 set_proto_ctx_engines_balance(struct i915_user_extension __user *base, 398 void *data) 399 { 400 struct i915_context_engines_load_balance __user *ext = 401 container_of_user(base, typeof(*ext), base); 402 const struct set_proto_ctx_engines *set = data; 403 struct drm_i915_private *i915 = set->i915; 404 struct intel_engine_cs **siblings; 405 u16 num_siblings, idx; 406 unsigned int n; 407 int err; 408 409 if (!HAS_EXECLISTS(i915)) 410 return -ENODEV; 411 412 if (get_user(idx, &ext->engine_index)) 413 return -EFAULT; 414 415 if (idx >= set->num_engines) { 416 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 417 idx, set->num_engines); 418 return -EINVAL; 419 } 420 421 idx = array_index_nospec(idx, set->num_engines); 422 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) { 423 drm_dbg(&i915->drm, 424 "Invalid placement[%d], already occupied\n", idx); 425 return -EEXIST; 426 } 427 428 if (get_user(num_siblings, &ext->num_siblings)) 429 return -EFAULT; 430 431 err = check_user_mbz(&ext->flags); 432 if (err) 433 return err; 434 435 err = check_user_mbz(&ext->mbz64); 436 if (err) 437 return err; 438 439 if (num_siblings == 0) 440 return 0; 441 442 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); 443 if (!siblings) 444 return -ENOMEM; 445 446 for (n = 0; n < num_siblings; n++) { 447 struct i915_engine_class_instance ci; 448 449 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 450 err = -EFAULT; 451 goto err_siblings; 452 } 453 454 siblings[n] = intel_engine_lookup_user(i915, 455 ci.engine_class, 456 ci.engine_instance); 457 if (!siblings[n]) { 458 drm_dbg(&i915->drm, 459 "Invalid sibling[%d]: { class:%d, inst:%d }\n", 460 n, ci.engine_class, ci.engine_instance); 461 err = -EINVAL; 462 goto err_siblings; 463 } 464 } 465 466 if (num_siblings == 1) { 467 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL; 468 set->engines[idx].engine = siblings[0]; 469 kfree(siblings); 470 } else { 471 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED; 472 set->engines[idx].num_siblings = num_siblings; 473 set->engines[idx].siblings = siblings; 474 } 475 476 return 0; 477 478 err_siblings: 479 kfree(siblings); 480 481 return err; 482 } 483 484 static int 485 set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) 486 { 487 struct i915_context_engines_bond __user *ext = 488 container_of_user(base, typeof(*ext), base); 489 const struct set_proto_ctx_engines *set = data; 490 struct drm_i915_private *i915 = set->i915; 491 struct i915_engine_class_instance ci; 492 struct intel_engine_cs *master; 493 u16 idx, num_bonds; 494 int err, n; 495 496 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && 497 !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { 498 drm_dbg(&i915->drm, 499 "Bonding not supported on this platform\n"); 500 return -ENODEV; 501 } 502 503 if (get_user(idx, &ext->virtual_index)) 504 return -EFAULT; 505 506 if (idx >= set->num_engines) { 507 drm_dbg(&i915->drm, 508 "Invalid index for virtual engine: %d >= %d\n", 509 idx, set->num_engines); 510 return -EINVAL; 511 } 512 513 idx = array_index_nospec(idx, set->num_engines); 514 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) { 515 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 516 return -EINVAL; 517 } 518 519 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) { 520 drm_dbg(&i915->drm, 521 "Bonding with virtual engines not allowed\n"); 522 return -EINVAL; 523 } 524 525 err = check_user_mbz(&ext->flags); 526 if (err) 527 return err; 528 529 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 530 err = check_user_mbz(&ext->mbz64[n]); 531 if (err) 532 return err; 533 } 534 535 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 536 return -EFAULT; 537 538 master = intel_engine_lookup_user(i915, 539 ci.engine_class, 540 ci.engine_instance); 541 if (!master) { 542 drm_dbg(&i915->drm, 543 "Unrecognised master engine: { class:%u, instance:%u }\n", 544 ci.engine_class, ci.engine_instance); 545 return -EINVAL; 546 } 547 548 if (intel_engine_uses_guc(master)) { 549 drm_dbg(&i915->drm, "bonding extension not supported with GuC submission"); 550 return -ENODEV; 551 } 552 553 if (get_user(num_bonds, &ext->num_bonds)) 554 return -EFAULT; 555 556 for (n = 0; n < num_bonds; n++) { 557 struct intel_engine_cs *bond; 558 559 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 560 return -EFAULT; 561 562 bond = intel_engine_lookup_user(i915, 563 ci.engine_class, 564 ci.engine_instance); 565 if (!bond) { 566 drm_dbg(&i915->drm, 567 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 568 n, ci.engine_class, ci.engine_instance); 569 return -EINVAL; 570 } 571 } 572 573 return 0; 574 } 575 576 static int 577 set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base, 578 void *data) 579 { 580 struct i915_context_engines_parallel_submit __user *ext = 581 container_of_user(base, typeof(*ext), base); 582 const struct set_proto_ctx_engines *set = data; 583 struct drm_i915_private *i915 = set->i915; 584 struct i915_engine_class_instance prev_engine; 585 u64 flags; 586 int err = 0, n, i, j; 587 u16 slot, width, num_siblings; 588 struct intel_engine_cs **siblings = NULL; 589 intel_engine_mask_t prev_mask; 590 591 if (get_user(slot, &ext->engine_index)) 592 return -EFAULT; 593 594 if (get_user(width, &ext->width)) 595 return -EFAULT; 596 597 if (get_user(num_siblings, &ext->num_siblings)) 598 return -EFAULT; 599 600 if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) && 601 num_siblings != 1) { 602 drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n", 603 num_siblings); 604 return -EINVAL; 605 } 606 607 if (slot >= set->num_engines) { 608 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 609 slot, set->num_engines); 610 return -EINVAL; 611 } 612 613 if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { 614 drm_dbg(&i915->drm, 615 "Invalid placement[%d], already occupied\n", slot); 616 return -EINVAL; 617 } 618 619 if (get_user(flags, &ext->flags)) 620 return -EFAULT; 621 622 if (flags) { 623 drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags); 624 return -EINVAL; 625 } 626 627 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 628 err = check_user_mbz(&ext->mbz64[n]); 629 if (err) 630 return err; 631 } 632 633 if (width < 2) { 634 drm_dbg(&i915->drm, "Width (%d) < 2\n", width); 635 return -EINVAL; 636 } 637 638 if (num_siblings < 1) { 639 drm_dbg(&i915->drm, "Number siblings (%d) < 1\n", 640 num_siblings); 641 return -EINVAL; 642 } 643 644 siblings = kmalloc_array(num_siblings * width, 645 sizeof(*siblings), 646 GFP_KERNEL); 647 if (!siblings) 648 return -ENOMEM; 649 650 /* Create contexts / engines */ 651 for (i = 0; i < width; ++i) { 652 intel_engine_mask_t current_mask = 0; 653 654 for (j = 0; j < num_siblings; ++j) { 655 struct i915_engine_class_instance ci; 656 657 n = i * num_siblings + j; 658 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 659 err = -EFAULT; 660 goto out_err; 661 } 662 663 siblings[n] = 664 intel_engine_lookup_user(i915, ci.engine_class, 665 ci.engine_instance); 666 if (!siblings[n]) { 667 drm_dbg(&i915->drm, 668 "Invalid sibling[%d]: { class:%d, inst:%d }\n", 669 n, ci.engine_class, ci.engine_instance); 670 err = -EINVAL; 671 goto out_err; 672 } 673 674 /* 675 * We don't support breadcrumb handshake on these 676 * classes 677 */ 678 if (siblings[n]->class == RENDER_CLASS || 679 siblings[n]->class == COMPUTE_CLASS) { 680 err = -EINVAL; 681 goto out_err; 682 } 683 684 if (n) { 685 if (prev_engine.engine_class != 686 ci.engine_class) { 687 drm_dbg(&i915->drm, 688 "Mismatched class %d, %d\n", 689 prev_engine.engine_class, 690 ci.engine_class); 691 err = -EINVAL; 692 goto out_err; 693 } 694 } 695 696 prev_engine = ci; 697 current_mask |= siblings[n]->logical_mask; 698 } 699 700 if (i > 0) { 701 if (current_mask != prev_mask << 1) { 702 drm_dbg(&i915->drm, 703 "Non contiguous logical mask 0x%x, 0x%x\n", 704 prev_mask, current_mask); 705 err = -EINVAL; 706 goto out_err; 707 } 708 } 709 prev_mask = current_mask; 710 } 711 712 set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; 713 set->engines[slot].num_siblings = num_siblings; 714 set->engines[slot].width = width; 715 set->engines[slot].siblings = siblings; 716 717 return 0; 718 719 out_err: 720 kfree(siblings); 721 722 return err; 723 } 724 725 static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { 726 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, 727 [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, 728 [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] = 729 set_proto_ctx_engines_parallel_submit, 730 }; 731 732 static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, 733 struct i915_gem_proto_context *pc, 734 const struct drm_i915_gem_context_param *args) 735 { 736 struct drm_i915_private *i915 = fpriv->dev_priv; 737 struct set_proto_ctx_engines set = { .i915 = i915 }; 738 struct i915_context_param_engines __user *user = 739 u64_to_user_ptr(args->value); 740 unsigned int n; 741 u64 extensions; 742 int err; 743 744 if (pc->num_user_engines >= 0) { 745 drm_dbg(&i915->drm, "Cannot set engines twice"); 746 return -EINVAL; 747 } 748 749 if (args->size < sizeof(*user) || 750 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) { 751 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 752 args->size); 753 return -EINVAL; 754 } 755 756 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 757 /* RING_MASK has no shift so we can use it directly here */ 758 if (set.num_engines > I915_EXEC_RING_MASK + 1) 759 return -EINVAL; 760 761 set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL); 762 if (!set.engines) 763 return -ENOMEM; 764 765 for (n = 0; n < set.num_engines; n++) { 766 struct i915_engine_class_instance ci; 767 struct intel_engine_cs *engine; 768 769 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 770 kfree(set.engines); 771 return -EFAULT; 772 } 773 774 memset(&set.engines[n], 0, sizeof(set.engines[n])); 775 776 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 777 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) 778 continue; 779 780 engine = intel_engine_lookup_user(i915, 781 ci.engine_class, 782 ci.engine_instance); 783 if (!engine) { 784 drm_dbg(&i915->drm, 785 "Invalid engine[%d]: { class:%d, instance:%d }\n", 786 n, ci.engine_class, ci.engine_instance); 787 kfree(set.engines); 788 return -ENOENT; 789 } 790 791 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL; 792 set.engines[n].engine = engine; 793 } 794 795 err = -EFAULT; 796 if (!get_user(extensions, &user->extensions)) 797 err = i915_user_extensions(u64_to_user_ptr(extensions), 798 set_proto_ctx_engines_extensions, 799 ARRAY_SIZE(set_proto_ctx_engines_extensions), 800 &set); 801 if (err) { 802 kfree(set.engines); 803 return err; 804 } 805 806 pc->num_user_engines = set.num_engines; 807 pc->user_engines = set.engines; 808 809 return 0; 810 } 811 812 static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, 813 struct i915_gem_proto_context *pc, 814 struct drm_i915_gem_context_param *args) 815 { 816 struct drm_i915_private *i915 = fpriv->dev_priv; 817 struct drm_i915_gem_context_param_sseu user_sseu; 818 struct intel_sseu *sseu; 819 int ret; 820 821 if (args->size < sizeof(user_sseu)) 822 return -EINVAL; 823 824 if (GRAPHICS_VER(i915) != 11) 825 return -ENODEV; 826 827 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 828 sizeof(user_sseu))) 829 return -EFAULT; 830 831 if (user_sseu.rsvd) 832 return -EINVAL; 833 834 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 835 return -EINVAL; 836 837 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0)) 838 return -EINVAL; 839 840 if (pc->num_user_engines >= 0) { 841 int idx = user_sseu.engine.engine_instance; 842 struct i915_gem_proto_engine *pe; 843 844 if (idx >= pc->num_user_engines) 845 return -EINVAL; 846 847 pe = &pc->user_engines[idx]; 848 849 /* Only render engine supports RPCS configuration. */ 850 if (pe->engine->class != RENDER_CLASS) 851 return -EINVAL; 852 853 sseu = &pe->sseu; 854 } else { 855 /* Only render engine supports RPCS configuration. */ 856 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER) 857 return -EINVAL; 858 859 /* There is only one render engine */ 860 if (user_sseu.engine.engine_instance != 0) 861 return -EINVAL; 862 863 sseu = &pc->legacy_rcs_sseu; 864 } 865 866 ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu); 867 if (ret) 868 return ret; 869 870 args->size = sizeof(user_sseu); 871 872 return 0; 873 } 874 875 static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, 876 struct i915_gem_proto_context *pc, 877 struct drm_i915_gem_context_param *args) 878 { 879 int ret = 0; 880 881 switch (args->param) { 882 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 883 if (args->size) 884 ret = -EINVAL; 885 else if (args->value) 886 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE); 887 else 888 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE); 889 break; 890 891 case I915_CONTEXT_PARAM_BANNABLE: 892 if (args->size) 893 ret = -EINVAL; 894 else if (!capable(CAP_SYS_ADMIN) && !args->value) 895 ret = -EPERM; 896 else if (args->value) 897 pc->user_flags |= BIT(UCONTEXT_BANNABLE); 898 else if (pc->uses_protected_content) 899 ret = -EPERM; 900 else 901 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); 902 break; 903 904 case I915_CONTEXT_PARAM_RECOVERABLE: 905 if (args->size) 906 ret = -EINVAL; 907 else if (!args->value) 908 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); 909 else if (pc->uses_protected_content) 910 ret = -EPERM; 911 else 912 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); 913 break; 914 915 case I915_CONTEXT_PARAM_PRIORITY: 916 ret = validate_priority(fpriv->dev_priv, args); 917 if (!ret) 918 pc->sched.priority = args->value; 919 break; 920 921 case I915_CONTEXT_PARAM_SSEU: 922 ret = set_proto_ctx_sseu(fpriv, pc, args); 923 break; 924 925 case I915_CONTEXT_PARAM_VM: 926 ret = set_proto_ctx_vm(fpriv, pc, args); 927 break; 928 929 case I915_CONTEXT_PARAM_ENGINES: 930 ret = set_proto_ctx_engines(fpriv, pc, args); 931 break; 932 933 case I915_CONTEXT_PARAM_PERSISTENCE: 934 if (args->size) 935 ret = -EINVAL; 936 else 937 ret = proto_context_set_persistence(fpriv->dev_priv, pc, 938 args->value); 939 break; 940 941 case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 942 ret = proto_context_set_protected(fpriv->dev_priv, pc, 943 args->value); 944 break; 945 946 case I915_CONTEXT_PARAM_NO_ZEROMAP: 947 case I915_CONTEXT_PARAM_BAN_PERIOD: 948 case I915_CONTEXT_PARAM_RINGSIZE: 949 default: 950 ret = -EINVAL; 951 break; 952 } 953 954 return ret; 955 } 956 957 static int intel_context_set_gem(struct intel_context *ce, 958 struct i915_gem_context *ctx, 959 struct intel_sseu sseu) 960 { 961 int ret = 0; 962 963 GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 964 RCU_INIT_POINTER(ce->gem_context, ctx); 965 966 GEM_BUG_ON(intel_context_is_pinned(ce)); 967 ce->ring_size = SZ_16K; 968 969 i915_vm_put(ce->vm); 970 ce->vm = i915_gem_context_get_eb_vm(ctx); 971 972 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 973 intel_engine_has_timeslices(ce->engine) && 974 intel_engine_has_semaphores(ce->engine)) 975 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 976 977 if (CONFIG_DRM_I915_REQUEST_TIMEOUT && 978 ctx->i915->params.request_timeout_ms) { 979 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; 980 981 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000); 982 } 983 984 /* A valid SSEU has no zero fields */ 985 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) 986 ret = intel_context_reconfigure_sseu(ce, sseu); 987 988 return ret; 989 } 990 991 static void __unpin_engines(struct i915_gem_engines *e, unsigned int count) 992 { 993 while (count--) { 994 struct intel_context *ce = e->engines[count], *child; 995 996 if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) 997 continue; 998 999 for_each_child(ce, child) 1000 intel_context_unpin(child); 1001 intel_context_unpin(ce); 1002 } 1003 } 1004 1005 static void unpin_engines(struct i915_gem_engines *e) 1006 { 1007 __unpin_engines(e, e->num_engines); 1008 } 1009 1010 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 1011 { 1012 while (count--) { 1013 if (!e->engines[count]) 1014 continue; 1015 1016 intel_context_put(e->engines[count]); 1017 } 1018 kfree(e); 1019 } 1020 1021 static void free_engines(struct i915_gem_engines *e) 1022 { 1023 __free_engines(e, e->num_engines); 1024 } 1025 1026 static void free_engines_rcu(struct rcu_head *rcu) 1027 { 1028 struct i915_gem_engines *engines = 1029 container_of(rcu, struct i915_gem_engines, rcu); 1030 1031 i915_sw_fence_fini(&engines->fence); 1032 free_engines(engines); 1033 } 1034 1035 static void accumulate_runtime(struct i915_drm_client *client, 1036 struct i915_gem_engines *engines) 1037 { 1038 struct i915_gem_engines_iter it; 1039 struct intel_context *ce; 1040 1041 if (!client) 1042 return; 1043 1044 /* Transfer accumulated runtime to the parent GEM context. */ 1045 for_each_gem_engine(ce, engines, it) { 1046 unsigned int class = ce->engine->uabi_class; 1047 1048 GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime)); 1049 atomic64_add(intel_context_get_total_runtime_ns(ce), 1050 &client->past_runtime[class]); 1051 } 1052 } 1053 1054 static int 1055 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 1056 { 1057 struct i915_gem_engines *engines = 1058 container_of(fence, typeof(*engines), fence); 1059 struct i915_gem_context *ctx = engines->ctx; 1060 1061 switch (state) { 1062 case FENCE_COMPLETE: 1063 if (!list_empty(&engines->link)) { 1064 unsigned long flags; 1065 1066 spin_lock_irqsave(&ctx->stale.lock, flags); 1067 list_del(&engines->link); 1068 spin_unlock_irqrestore(&ctx->stale.lock, flags); 1069 } 1070 accumulate_runtime(ctx->client, engines); 1071 i915_gem_context_put(ctx); 1072 1073 break; 1074 1075 case FENCE_FREE: 1076 init_rcu_head(&engines->rcu); 1077 call_rcu(&engines->rcu, free_engines_rcu); 1078 break; 1079 } 1080 1081 return NOTIFY_DONE; 1082 } 1083 1084 static struct i915_gem_engines *alloc_engines(unsigned int count) 1085 { 1086 struct i915_gem_engines *e; 1087 1088 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); 1089 if (!e) 1090 return NULL; 1091 1092 i915_sw_fence_init(&e->fence, engines_notify); 1093 return e; 1094 } 1095 1096 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx, 1097 struct intel_sseu rcs_sseu) 1098 { 1099 const struct intel_gt *gt = to_gt(ctx->i915); 1100 struct intel_engine_cs *engine; 1101 struct i915_gem_engines *e, *err; 1102 enum intel_engine_id id; 1103 1104 e = alloc_engines(I915_NUM_ENGINES); 1105 if (!e) 1106 return ERR_PTR(-ENOMEM); 1107 1108 for_each_engine(engine, gt, id) { 1109 struct intel_context *ce; 1110 struct intel_sseu sseu = {}; 1111 int ret; 1112 1113 if (engine->legacy_idx == INVALID_ENGINE) 1114 continue; 1115 1116 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 1117 GEM_BUG_ON(e->engines[engine->legacy_idx]); 1118 1119 ce = intel_context_create(engine); 1120 if (IS_ERR(ce)) { 1121 err = ERR_CAST(ce); 1122 goto free_engines; 1123 } 1124 1125 e->engines[engine->legacy_idx] = ce; 1126 e->num_engines = max(e->num_engines, engine->legacy_idx + 1); 1127 1128 if (engine->class == RENDER_CLASS) 1129 sseu = rcs_sseu; 1130 1131 ret = intel_context_set_gem(ce, ctx, sseu); 1132 if (ret) { 1133 err = ERR_PTR(ret); 1134 goto free_engines; 1135 } 1136 1137 } 1138 1139 return e; 1140 1141 free_engines: 1142 free_engines(e); 1143 return err; 1144 } 1145 1146 static int perma_pin_contexts(struct intel_context *ce) 1147 { 1148 struct intel_context *child; 1149 int i = 0, j = 0, ret; 1150 1151 GEM_BUG_ON(!intel_context_is_parent(ce)); 1152 1153 ret = intel_context_pin(ce); 1154 if (unlikely(ret)) 1155 return ret; 1156 1157 for_each_child(ce, child) { 1158 ret = intel_context_pin(child); 1159 if (unlikely(ret)) 1160 goto unwind; 1161 ++i; 1162 } 1163 1164 set_bit(CONTEXT_PERMA_PIN, &ce->flags); 1165 1166 return 0; 1167 1168 unwind: 1169 intel_context_unpin(ce); 1170 for_each_child(ce, child) { 1171 if (j++ < i) 1172 intel_context_unpin(child); 1173 else 1174 break; 1175 } 1176 1177 return ret; 1178 } 1179 1180 static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, 1181 unsigned int num_engines, 1182 struct i915_gem_proto_engine *pe) 1183 { 1184 struct i915_gem_engines *e, *err; 1185 unsigned int n; 1186 1187 e = alloc_engines(num_engines); 1188 if (!e) 1189 return ERR_PTR(-ENOMEM); 1190 e->num_engines = num_engines; 1191 1192 for (n = 0; n < num_engines; n++) { 1193 struct intel_context *ce, *child; 1194 int ret; 1195 1196 switch (pe[n].type) { 1197 case I915_GEM_ENGINE_TYPE_PHYSICAL: 1198 ce = intel_context_create(pe[n].engine); 1199 break; 1200 1201 case I915_GEM_ENGINE_TYPE_BALANCED: 1202 ce = intel_engine_create_virtual(pe[n].siblings, 1203 pe[n].num_siblings, 0); 1204 break; 1205 1206 case I915_GEM_ENGINE_TYPE_PARALLEL: 1207 ce = intel_engine_create_parallel(pe[n].siblings, 1208 pe[n].num_siblings, 1209 pe[n].width); 1210 break; 1211 1212 case I915_GEM_ENGINE_TYPE_INVALID: 1213 default: 1214 GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID); 1215 continue; 1216 } 1217 1218 if (IS_ERR(ce)) { 1219 err = ERR_CAST(ce); 1220 goto free_engines; 1221 } 1222 1223 e->engines[n] = ce; 1224 1225 ret = intel_context_set_gem(ce, ctx, pe->sseu); 1226 if (ret) { 1227 err = ERR_PTR(ret); 1228 goto free_engines; 1229 } 1230 for_each_child(ce, child) { 1231 ret = intel_context_set_gem(child, ctx, pe->sseu); 1232 if (ret) { 1233 err = ERR_PTR(ret); 1234 goto free_engines; 1235 } 1236 } 1237 1238 /* 1239 * XXX: Must be done after calling intel_context_set_gem as that 1240 * function changes the ring size. The ring is allocated when 1241 * the context is pinned. If the ring size is changed after 1242 * allocation we have a mismatch of the ring size and will cause 1243 * the context to hang. Presumably with a bit of reordering we 1244 * could move the perma-pin step to the backend function 1245 * intel_engine_create_parallel. 1246 */ 1247 if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) { 1248 ret = perma_pin_contexts(ce); 1249 if (ret) { 1250 err = ERR_PTR(ret); 1251 goto free_engines; 1252 } 1253 } 1254 } 1255 1256 return e; 1257 1258 free_engines: 1259 free_engines(e); 1260 return err; 1261 } 1262 1263 static void i915_gem_context_release_work(struct work_struct *work) 1264 { 1265 struct i915_gem_context *ctx = container_of(work, typeof(*ctx), 1266 release_work); 1267 struct i915_address_space *vm; 1268 1269 trace_i915_context_free(ctx); 1270 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 1271 1272 spin_lock(&ctx->i915->gem.contexts.lock); 1273 list_del(&ctx->link); 1274 spin_unlock(&ctx->i915->gem.contexts.lock); 1275 1276 if (ctx->syncobj) 1277 drm_syncobj_put(ctx->syncobj); 1278 1279 vm = ctx->vm; 1280 if (vm) 1281 i915_vm_put(vm); 1282 1283 if (ctx->pxp_wakeref) 1284 intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref); 1285 1286 if (ctx->client) 1287 i915_drm_client_put(ctx->client); 1288 1289 mutex_destroy(&ctx->engines_mutex); 1290 mutex_destroy(&ctx->lut_mutex); 1291 1292 put_pid(ctx->pid); 1293 mutex_destroy(&ctx->mutex); 1294 1295 kfree_rcu(ctx, rcu); 1296 } 1297 1298 void i915_gem_context_release(struct kref *ref) 1299 { 1300 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 1301 1302 queue_work(ctx->i915->wq, &ctx->release_work); 1303 } 1304 1305 static inline struct i915_gem_engines * 1306 __context_engines_static(const struct i915_gem_context *ctx) 1307 { 1308 return rcu_dereference_protected(ctx->engines, true); 1309 } 1310 1311 static void __reset_context(struct i915_gem_context *ctx, 1312 struct intel_engine_cs *engine) 1313 { 1314 intel_gt_handle_error(engine->gt, engine->mask, 0, 1315 "context closure in %s", ctx->name); 1316 } 1317 1318 static bool __cancel_engine(struct intel_engine_cs *engine) 1319 { 1320 /* 1321 * Send a "high priority pulse" down the engine to cause the 1322 * current request to be momentarily preempted. (If it fails to 1323 * be preempted, it will be reset). As we have marked our context 1324 * as banned, any incomplete request, including any running, will 1325 * be skipped following the preemption. 1326 * 1327 * If there is no hangchecking (one of the reasons why we try to 1328 * cancel the context) and no forced preemption, there may be no 1329 * means by which we reset the GPU and evict the persistent hog. 1330 * Ergo if we are unable to inject a preemptive pulse that can 1331 * kill the banned context, we fallback to doing a local reset 1332 * instead. 1333 */ 1334 return intel_engine_pulse(engine) == 0; 1335 } 1336 1337 static struct intel_engine_cs *active_engine(struct intel_context *ce) 1338 { 1339 struct intel_engine_cs *engine = NULL; 1340 struct i915_request *rq; 1341 1342 if (intel_context_has_inflight(ce)) 1343 return intel_context_inflight(ce); 1344 1345 if (!ce->timeline) 1346 return NULL; 1347 1348 /* 1349 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference 1350 * to the request to prevent it being transferred to a new timeline 1351 * (and onto a new timeline->requests list). 1352 */ 1353 rcu_read_lock(); 1354 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 1355 bool found; 1356 1357 /* timeline is already completed upto this point? */ 1358 if (!i915_request_get_rcu(rq)) 1359 break; 1360 1361 /* Check with the backend if the request is inflight */ 1362 found = true; 1363 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) 1364 found = i915_request_active_engine(rq, &engine); 1365 1366 i915_request_put(rq); 1367 if (found) 1368 break; 1369 } 1370 rcu_read_unlock(); 1371 1372 return engine; 1373 } 1374 1375 static void 1376 kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent) 1377 { 1378 struct i915_gem_engines_iter it; 1379 struct intel_context *ce; 1380 1381 /* 1382 * Map the user's engine back to the actual engines; one virtual 1383 * engine will be mapped to multiple engines, and using ctx->engine[] 1384 * the same engine may be have multiple instances in the user's map. 1385 * However, we only care about pending requests, so only include 1386 * engines on which there are incomplete requests. 1387 */ 1388 for_each_gem_engine(ce, engines, it) { 1389 struct intel_engine_cs *engine; 1390 1391 if ((exit || !persistent) && intel_context_revoke(ce)) 1392 continue; /* Already marked. */ 1393 1394 /* 1395 * Check the current active state of this context; if we 1396 * are currently executing on the GPU we need to evict 1397 * ourselves. On the other hand, if we haven't yet been 1398 * submitted to the GPU or if everything is complete, 1399 * we have nothing to do. 1400 */ 1401 engine = active_engine(ce); 1402 1403 /* First attempt to gracefully cancel the context */ 1404 if (engine && !__cancel_engine(engine) && (exit || !persistent)) 1405 /* 1406 * If we are unable to send a preemptive pulse to bump 1407 * the context from the GPU, we have to resort to a full 1408 * reset. We hope the collateral damage is worth it. 1409 */ 1410 __reset_context(engines->ctx, engine); 1411 } 1412 } 1413 1414 static void kill_context(struct i915_gem_context *ctx) 1415 { 1416 struct i915_gem_engines *pos, *next; 1417 1418 spin_lock_irq(&ctx->stale.lock); 1419 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 1420 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { 1421 if (!i915_sw_fence_await(&pos->fence)) { 1422 list_del_init(&pos->link); 1423 continue; 1424 } 1425 1426 spin_unlock_irq(&ctx->stale.lock); 1427 1428 kill_engines(pos, !ctx->i915->params.enable_hangcheck, 1429 i915_gem_context_is_persistent(ctx)); 1430 1431 spin_lock_irq(&ctx->stale.lock); 1432 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); 1433 list_safe_reset_next(pos, next, link); 1434 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ 1435 1436 i915_sw_fence_complete(&pos->fence); 1437 } 1438 spin_unlock_irq(&ctx->stale.lock); 1439 } 1440 1441 static void engines_idle_release(struct i915_gem_context *ctx, 1442 struct i915_gem_engines *engines) 1443 { 1444 struct i915_gem_engines_iter it; 1445 struct intel_context *ce; 1446 1447 INIT_LIST_HEAD(&engines->link); 1448 1449 engines->ctx = i915_gem_context_get(ctx); 1450 1451 for_each_gem_engine(ce, engines, it) { 1452 int err; 1453 1454 /* serialises with execbuf */ 1455 intel_context_close(ce); 1456 if (!intel_context_pin_if_active(ce)) 1457 continue; 1458 1459 /* Wait until context is finally scheduled out and retired */ 1460 err = i915_sw_fence_await_active(&engines->fence, 1461 &ce->active, 1462 I915_ACTIVE_AWAIT_BARRIER); 1463 intel_context_unpin(ce); 1464 if (err) 1465 goto kill; 1466 } 1467 1468 spin_lock_irq(&ctx->stale.lock); 1469 if (!i915_gem_context_is_closed(ctx)) 1470 list_add_tail(&engines->link, &ctx->stale.engines); 1471 spin_unlock_irq(&ctx->stale.lock); 1472 1473 kill: 1474 if (list_empty(&engines->link)) /* raced, already closed */ 1475 kill_engines(engines, true, 1476 i915_gem_context_is_persistent(ctx)); 1477 1478 i915_sw_fence_commit(&engines->fence); 1479 } 1480 1481 static void set_closed_name(struct i915_gem_context *ctx) 1482 { 1483 char *s; 1484 1485 /* Replace '[]' with '<>' to indicate closed in debug prints */ 1486 1487 s = strrchr(ctx->name, '['); 1488 if (!s) 1489 return; 1490 1491 *s = '<'; 1492 1493 s = strchr(s + 1, ']'); 1494 if (s) 1495 *s = '>'; 1496 } 1497 1498 static void context_close(struct i915_gem_context *ctx) 1499 { 1500 struct i915_drm_client *client; 1501 1502 /* Flush any concurrent set_engines() */ 1503 mutex_lock(&ctx->engines_mutex); 1504 unpin_engines(__context_engines_static(ctx)); 1505 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); 1506 i915_gem_context_set_closed(ctx); 1507 mutex_unlock(&ctx->engines_mutex); 1508 1509 mutex_lock(&ctx->mutex); 1510 1511 set_closed_name(ctx); 1512 1513 /* 1514 * The LUT uses the VMA as a backpointer to unref the object, 1515 * so we need to clear the LUT before we close all the VMA (inside 1516 * the ppgtt). 1517 */ 1518 lut_close(ctx); 1519 1520 ctx->file_priv = ERR_PTR(-EBADF); 1521 1522 client = ctx->client; 1523 if (client) { 1524 spin_lock(&client->ctx_lock); 1525 list_del_rcu(&ctx->client_link); 1526 spin_unlock(&client->ctx_lock); 1527 } 1528 1529 mutex_unlock(&ctx->mutex); 1530 1531 /* 1532 * If the user has disabled hangchecking, we can not be sure that 1533 * the batches will ever complete after the context is closed, 1534 * keeping the context and all resources pinned forever. So in this 1535 * case we opt to forcibly kill off all remaining requests on 1536 * context close. 1537 */ 1538 kill_context(ctx); 1539 1540 i915_gem_context_put(ctx); 1541 } 1542 1543 static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 1544 { 1545 if (i915_gem_context_is_persistent(ctx) == state) 1546 return 0; 1547 1548 if (state) { 1549 /* 1550 * Only contexts that are short-lived [that will expire or be 1551 * reset] are allowed to survive past termination. We require 1552 * hangcheck to ensure that the persistent requests are healthy. 1553 */ 1554 if (!ctx->i915->params.enable_hangcheck) 1555 return -EINVAL; 1556 1557 i915_gem_context_set_persistence(ctx); 1558 } else { 1559 /* To cancel a context we use "preempt-to-idle" */ 1560 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 1561 return -ENODEV; 1562 1563 /* 1564 * If the cancel fails, we then need to reset, cleanly! 1565 * 1566 * If the per-engine reset fails, all hope is lost! We resort 1567 * to a full GPU reset in that unlikely case, but realistically 1568 * if the engine could not reset, the full reset does not fare 1569 * much better. The damage has been done. 1570 * 1571 * However, if we cannot reset an engine by itself, we cannot 1572 * cleanup a hanging persistent context without causing 1573 * colateral damage, and we should not pretend we can by 1574 * exposing the interface. 1575 */ 1576 if (!intel_has_reset_engine(to_gt(ctx->i915))) 1577 return -ENODEV; 1578 1579 i915_gem_context_clear_persistence(ctx); 1580 } 1581 1582 return 0; 1583 } 1584 1585 static struct i915_gem_context * 1586 i915_gem_create_context(struct drm_i915_private *i915, 1587 const struct i915_gem_proto_context *pc) 1588 { 1589 struct i915_gem_context *ctx; 1590 struct i915_address_space *vm = NULL; 1591 struct i915_gem_engines *e; 1592 int err; 1593 int i; 1594 1595 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1596 if (!ctx) 1597 return ERR_PTR(-ENOMEM); 1598 1599 kref_init(&ctx->ref); 1600 ctx->i915 = i915; 1601 ctx->sched = pc->sched; 1602 mutex_init(&ctx->mutex); 1603 INIT_LIST_HEAD(&ctx->link); 1604 INIT_WORK(&ctx->release_work, i915_gem_context_release_work); 1605 1606 spin_lock_init(&ctx->stale.lock); 1607 INIT_LIST_HEAD(&ctx->stale.engines); 1608 1609 if (pc->vm) { 1610 vm = i915_vm_get(pc->vm); 1611 } else if (HAS_FULL_PPGTT(i915)) { 1612 struct i915_ppgtt *ppgtt; 1613 1614 ppgtt = i915_ppgtt_create(to_gt(i915), 0); 1615 if (IS_ERR(ppgtt)) { 1616 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 1617 PTR_ERR(ppgtt)); 1618 err = PTR_ERR(ppgtt); 1619 goto err_ctx; 1620 } 1621 vm = &ppgtt->vm; 1622 } 1623 if (vm) 1624 ctx->vm = vm; 1625 1626 mutex_init(&ctx->engines_mutex); 1627 if (pc->num_user_engines >= 0) { 1628 i915_gem_context_set_user_engines(ctx); 1629 e = user_engines(ctx, pc->num_user_engines, pc->user_engines); 1630 } else { 1631 i915_gem_context_clear_user_engines(ctx); 1632 e = default_engines(ctx, pc->legacy_rcs_sseu); 1633 } 1634 if (IS_ERR(e)) { 1635 err = PTR_ERR(e); 1636 goto err_vm; 1637 } 1638 RCU_INIT_POINTER(ctx->engines, e); 1639 1640 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 1641 mutex_init(&ctx->lut_mutex); 1642 1643 /* NB: Mark all slices as needing a remap so that when the context first 1644 * loads it will restore whatever remap state already exists. If there 1645 * is no remap info, it will be a NOP. */ 1646 ctx->remap_slice = ALL_L3_SLICES(i915); 1647 1648 ctx->user_flags = pc->user_flags; 1649 1650 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 1651 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 1652 1653 if (pc->single_timeline) { 1654 err = drm_syncobj_create(&ctx->syncobj, 1655 DRM_SYNCOBJ_CREATE_SIGNALED, 1656 NULL); 1657 if (err) 1658 goto err_engines; 1659 } 1660 1661 if (pc->uses_protected_content) { 1662 ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1663 ctx->uses_protected_content = true; 1664 } 1665 1666 trace_i915_context_create(ctx); 1667 1668 return ctx; 1669 1670 err_engines: 1671 free_engines(e); 1672 err_vm: 1673 if (ctx->vm) 1674 i915_vm_put(ctx->vm); 1675 err_ctx: 1676 kfree(ctx); 1677 return ERR_PTR(err); 1678 } 1679 1680 static void init_contexts(struct i915_gem_contexts *gc) 1681 { 1682 spin_lock_init(&gc->lock); 1683 INIT_LIST_HEAD(&gc->list); 1684 } 1685 1686 void i915_gem_init__contexts(struct drm_i915_private *i915) 1687 { 1688 init_contexts(&i915->gem.contexts); 1689 } 1690 1691 static void gem_context_register(struct i915_gem_context *ctx, 1692 struct drm_i915_file_private *fpriv, 1693 u32 id) 1694 { 1695 struct drm_i915_private *i915 = ctx->i915; 1696 void *old; 1697 1698 ctx->file_priv = fpriv; 1699 1700 ctx->pid = get_task_pid(current, PIDTYPE_PID); 1701 ctx->client = i915_drm_client_get(fpriv->client); 1702 1703 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 1704 current->comm, pid_nr(ctx->pid)); 1705 1706 /* And finally expose ourselves to userspace via the idr */ 1707 old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); 1708 WARN_ON(old); 1709 1710 spin_lock(&ctx->client->ctx_lock); 1711 list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list); 1712 spin_unlock(&ctx->client->ctx_lock); 1713 1714 spin_lock(&i915->gem.contexts.lock); 1715 list_add_tail(&ctx->link, &i915->gem.contexts.list); 1716 spin_unlock(&i915->gem.contexts.lock); 1717 } 1718 1719 int i915_gem_context_open(struct drm_i915_private *i915, 1720 struct drm_file *file) 1721 { 1722 struct drm_i915_file_private *file_priv = file->driver_priv; 1723 struct i915_gem_proto_context *pc; 1724 struct i915_gem_context *ctx; 1725 int err; 1726 1727 mutex_init(&file_priv->proto_context_lock); 1728 xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC); 1729 1730 /* 0 reserved for the default context */ 1731 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1); 1732 1733 /* 0 reserved for invalid/unassigned ppgtt */ 1734 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 1735 1736 pc = proto_context_create(i915, 0); 1737 if (IS_ERR(pc)) { 1738 err = PTR_ERR(pc); 1739 goto err; 1740 } 1741 1742 ctx = i915_gem_create_context(i915, pc); 1743 proto_context_close(i915, pc); 1744 if (IS_ERR(ctx)) { 1745 err = PTR_ERR(ctx); 1746 goto err; 1747 } 1748 1749 gem_context_register(ctx, file_priv, 0); 1750 1751 return 0; 1752 1753 err: 1754 xa_destroy(&file_priv->vm_xa); 1755 xa_destroy(&file_priv->context_xa); 1756 xa_destroy(&file_priv->proto_context_xa); 1757 mutex_destroy(&file_priv->proto_context_lock); 1758 return err; 1759 } 1760 1761 void i915_gem_context_close(struct drm_file *file) 1762 { 1763 struct drm_i915_file_private *file_priv = file->driver_priv; 1764 struct i915_gem_proto_context *pc; 1765 struct i915_address_space *vm; 1766 struct i915_gem_context *ctx; 1767 unsigned long idx; 1768 1769 xa_for_each(&file_priv->proto_context_xa, idx, pc) 1770 proto_context_close(file_priv->dev_priv, pc); 1771 xa_destroy(&file_priv->proto_context_xa); 1772 mutex_destroy(&file_priv->proto_context_lock); 1773 1774 xa_for_each(&file_priv->context_xa, idx, ctx) 1775 context_close(ctx); 1776 xa_destroy(&file_priv->context_xa); 1777 1778 xa_for_each(&file_priv->vm_xa, idx, vm) 1779 i915_vm_put(vm); 1780 xa_destroy(&file_priv->vm_xa); 1781 } 1782 1783 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 1784 struct drm_file *file) 1785 { 1786 struct drm_i915_private *i915 = to_i915(dev); 1787 struct drm_i915_gem_vm_control *args = data; 1788 struct drm_i915_file_private *file_priv = file->driver_priv; 1789 struct i915_ppgtt *ppgtt; 1790 u32 id; 1791 int err; 1792 1793 if (!HAS_FULL_PPGTT(i915)) 1794 return -ENODEV; 1795 1796 if (args->flags) 1797 return -EINVAL; 1798 1799 ppgtt = i915_ppgtt_create(to_gt(i915), 0); 1800 if (IS_ERR(ppgtt)) 1801 return PTR_ERR(ppgtt); 1802 1803 if (args->extensions) { 1804 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 1805 NULL, 0, 1806 ppgtt); 1807 if (err) 1808 goto err_put; 1809 } 1810 1811 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 1812 xa_limit_32b, GFP_KERNEL); 1813 if (err) 1814 goto err_put; 1815 1816 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1817 args->vm_id = id; 1818 return 0; 1819 1820 err_put: 1821 i915_vm_put(&ppgtt->vm); 1822 return err; 1823 } 1824 1825 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 1826 struct drm_file *file) 1827 { 1828 struct drm_i915_file_private *file_priv = file->driver_priv; 1829 struct drm_i915_gem_vm_control *args = data; 1830 struct i915_address_space *vm; 1831 1832 if (args->flags) 1833 return -EINVAL; 1834 1835 if (args->extensions) 1836 return -EINVAL; 1837 1838 vm = xa_erase(&file_priv->vm_xa, args->vm_id); 1839 if (!vm) 1840 return -ENOENT; 1841 1842 i915_vm_put(vm); 1843 return 0; 1844 } 1845 1846 static int get_ppgtt(struct drm_i915_file_private *file_priv, 1847 struct i915_gem_context *ctx, 1848 struct drm_i915_gem_context_param *args) 1849 { 1850 struct i915_address_space *vm; 1851 int err; 1852 u32 id; 1853 1854 if (!i915_gem_context_has_full_ppgtt(ctx)) 1855 return -ENODEV; 1856 1857 vm = ctx->vm; 1858 GEM_BUG_ON(!vm); 1859 1860 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 1861 if (err) 1862 return err; 1863 1864 i915_vm_get(vm); 1865 1866 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1867 args->value = id; 1868 args->size = 0; 1869 1870 return err; 1871 } 1872 1873 int 1874 i915_gem_user_to_context_sseu(struct intel_gt *gt, 1875 const struct drm_i915_gem_context_param_sseu *user, 1876 struct intel_sseu *context) 1877 { 1878 const struct sseu_dev_info *device = >->info.sseu; 1879 struct drm_i915_private *i915 = gt->i915; 1880 unsigned int dev_subslice_mask = intel_sseu_get_hsw_subslices(device, 0); 1881 1882 /* No zeros in any field. */ 1883 if (!user->slice_mask || !user->subslice_mask || 1884 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1885 return -EINVAL; 1886 1887 /* Max > min. */ 1888 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1889 return -EINVAL; 1890 1891 /* 1892 * Some future proofing on the types since the uAPI is wider than the 1893 * current internal implementation. 1894 */ 1895 if (overflows_type(user->slice_mask, context->slice_mask) || 1896 overflows_type(user->subslice_mask, context->subslice_mask) || 1897 overflows_type(user->min_eus_per_subslice, 1898 context->min_eus_per_subslice) || 1899 overflows_type(user->max_eus_per_subslice, 1900 context->max_eus_per_subslice)) 1901 return -EINVAL; 1902 1903 /* Check validity against hardware. */ 1904 if (user->slice_mask & ~device->slice_mask) 1905 return -EINVAL; 1906 1907 if (user->subslice_mask & ~dev_subslice_mask) 1908 return -EINVAL; 1909 1910 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1911 return -EINVAL; 1912 1913 context->slice_mask = user->slice_mask; 1914 context->subslice_mask = user->subslice_mask; 1915 context->min_eus_per_subslice = user->min_eus_per_subslice; 1916 context->max_eus_per_subslice = user->max_eus_per_subslice; 1917 1918 /* Part specific restrictions. */ 1919 if (GRAPHICS_VER(i915) == 11) { 1920 unsigned int hw_s = hweight8(device->slice_mask); 1921 unsigned int hw_ss_per_s = hweight8(dev_subslice_mask); 1922 unsigned int req_s = hweight8(context->slice_mask); 1923 unsigned int req_ss = hweight8(context->subslice_mask); 1924 1925 /* 1926 * Only full subslice enablement is possible if more than one 1927 * slice is turned on. 1928 */ 1929 if (req_s > 1 && req_ss != hw_ss_per_s) 1930 return -EINVAL; 1931 1932 /* 1933 * If more than four (SScount bitfield limit) subslices are 1934 * requested then the number has to be even. 1935 */ 1936 if (req_ss > 4 && (req_ss & 1)) 1937 return -EINVAL; 1938 1939 /* 1940 * If only one slice is enabled and subslice count is below the 1941 * device full enablement, it must be at most half of the all 1942 * available subslices. 1943 */ 1944 if (req_s == 1 && req_ss < hw_ss_per_s && 1945 req_ss > (hw_ss_per_s / 2)) 1946 return -EINVAL; 1947 1948 /* ABI restriction - VME use case only. */ 1949 1950 /* All slices or one slice only. */ 1951 if (req_s != 1 && req_s != hw_s) 1952 return -EINVAL; 1953 1954 /* 1955 * Half subslices or full enablement only when one slice is 1956 * enabled. 1957 */ 1958 if (req_s == 1 && 1959 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1960 return -EINVAL; 1961 1962 /* No EU configuration changes. */ 1963 if ((user->min_eus_per_subslice != 1964 device->max_eus_per_subslice) || 1965 (user->max_eus_per_subslice != 1966 device->max_eus_per_subslice)) 1967 return -EINVAL; 1968 } 1969 1970 return 0; 1971 } 1972 1973 static int set_sseu(struct i915_gem_context *ctx, 1974 struct drm_i915_gem_context_param *args) 1975 { 1976 struct drm_i915_private *i915 = ctx->i915; 1977 struct drm_i915_gem_context_param_sseu user_sseu; 1978 struct intel_context *ce; 1979 struct intel_sseu sseu; 1980 unsigned long lookup; 1981 int ret; 1982 1983 if (args->size < sizeof(user_sseu)) 1984 return -EINVAL; 1985 1986 if (GRAPHICS_VER(i915) != 11) 1987 return -ENODEV; 1988 1989 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1990 sizeof(user_sseu))) 1991 return -EFAULT; 1992 1993 if (user_sseu.rsvd) 1994 return -EINVAL; 1995 1996 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1997 return -EINVAL; 1998 1999 lookup = 0; 2000 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2001 lookup |= LOOKUP_USER_INDEX; 2002 2003 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2004 if (IS_ERR(ce)) 2005 return PTR_ERR(ce); 2006 2007 /* Only render engine supports RPCS configuration. */ 2008 if (ce->engine->class != RENDER_CLASS) { 2009 ret = -ENODEV; 2010 goto out_ce; 2011 } 2012 2013 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); 2014 if (ret) 2015 goto out_ce; 2016 2017 ret = intel_context_reconfigure_sseu(ce, sseu); 2018 if (ret) 2019 goto out_ce; 2020 2021 args->size = sizeof(user_sseu); 2022 2023 out_ce: 2024 intel_context_put(ce); 2025 return ret; 2026 } 2027 2028 static int 2029 set_persistence(struct i915_gem_context *ctx, 2030 const struct drm_i915_gem_context_param *args) 2031 { 2032 if (args->size) 2033 return -EINVAL; 2034 2035 return __context_set_persistence(ctx, args->value); 2036 } 2037 2038 static int set_priority(struct i915_gem_context *ctx, 2039 const struct drm_i915_gem_context_param *args) 2040 { 2041 struct i915_gem_engines_iter it; 2042 struct intel_context *ce; 2043 int err; 2044 2045 err = validate_priority(ctx->i915, args); 2046 if (err) 2047 return err; 2048 2049 ctx->sched.priority = args->value; 2050 2051 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 2052 if (!intel_engine_has_timeslices(ce->engine)) 2053 continue; 2054 2055 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 2056 intel_engine_has_semaphores(ce->engine)) 2057 intel_context_set_use_semaphores(ce); 2058 else 2059 intel_context_clear_use_semaphores(ce); 2060 } 2061 i915_gem_context_unlock_engines(ctx); 2062 2063 return 0; 2064 } 2065 2066 static int get_protected(struct i915_gem_context *ctx, 2067 struct drm_i915_gem_context_param *args) 2068 { 2069 args->size = 0; 2070 args->value = i915_gem_context_uses_protected_content(ctx); 2071 2072 return 0; 2073 } 2074 2075 static int ctx_setparam(struct drm_i915_file_private *fpriv, 2076 struct i915_gem_context *ctx, 2077 struct drm_i915_gem_context_param *args) 2078 { 2079 int ret = 0; 2080 2081 switch (args->param) { 2082 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2083 if (args->size) 2084 ret = -EINVAL; 2085 else if (args->value) 2086 i915_gem_context_set_no_error_capture(ctx); 2087 else 2088 i915_gem_context_clear_no_error_capture(ctx); 2089 break; 2090 2091 case I915_CONTEXT_PARAM_BANNABLE: 2092 if (args->size) 2093 ret = -EINVAL; 2094 else if (!capable(CAP_SYS_ADMIN) && !args->value) 2095 ret = -EPERM; 2096 else if (args->value) 2097 i915_gem_context_set_bannable(ctx); 2098 else if (i915_gem_context_uses_protected_content(ctx)) 2099 ret = -EPERM; /* can't clear this for protected contexts */ 2100 else 2101 i915_gem_context_clear_bannable(ctx); 2102 break; 2103 2104 case I915_CONTEXT_PARAM_RECOVERABLE: 2105 if (args->size) 2106 ret = -EINVAL; 2107 else if (!args->value) 2108 i915_gem_context_clear_recoverable(ctx); 2109 else if (i915_gem_context_uses_protected_content(ctx)) 2110 ret = -EPERM; /* can't set this for protected contexts */ 2111 else 2112 i915_gem_context_set_recoverable(ctx); 2113 break; 2114 2115 case I915_CONTEXT_PARAM_PRIORITY: 2116 ret = set_priority(ctx, args); 2117 break; 2118 2119 case I915_CONTEXT_PARAM_SSEU: 2120 ret = set_sseu(ctx, args); 2121 break; 2122 2123 case I915_CONTEXT_PARAM_PERSISTENCE: 2124 ret = set_persistence(ctx, args); 2125 break; 2126 2127 case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 2128 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2129 case I915_CONTEXT_PARAM_BAN_PERIOD: 2130 case I915_CONTEXT_PARAM_RINGSIZE: 2131 case I915_CONTEXT_PARAM_VM: 2132 case I915_CONTEXT_PARAM_ENGINES: 2133 default: 2134 ret = -EINVAL; 2135 break; 2136 } 2137 2138 return ret; 2139 } 2140 2141 struct create_ext { 2142 struct i915_gem_proto_context *pc; 2143 struct drm_i915_file_private *fpriv; 2144 }; 2145 2146 static int create_setparam(struct i915_user_extension __user *ext, void *data) 2147 { 2148 struct drm_i915_gem_context_create_ext_setparam local; 2149 const struct create_ext *arg = data; 2150 2151 if (copy_from_user(&local, ext, sizeof(local))) 2152 return -EFAULT; 2153 2154 if (local.param.ctx_id) 2155 return -EINVAL; 2156 2157 return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param); 2158 } 2159 2160 static int invalid_ext(struct i915_user_extension __user *ext, void *data) 2161 { 2162 return -EINVAL; 2163 } 2164 2165 static const i915_user_extension_fn create_extensions[] = { 2166 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 2167 [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext, 2168 }; 2169 2170 static bool client_is_banned(struct drm_i915_file_private *file_priv) 2171 { 2172 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 2173 } 2174 2175 static inline struct i915_gem_context * 2176 __context_lookup(struct drm_i915_file_private *file_priv, u32 id) 2177 { 2178 struct i915_gem_context *ctx; 2179 2180 rcu_read_lock(); 2181 ctx = xa_load(&file_priv->context_xa, id); 2182 if (ctx && !kref_get_unless_zero(&ctx->ref)) 2183 ctx = NULL; 2184 rcu_read_unlock(); 2185 2186 return ctx; 2187 } 2188 2189 static struct i915_gem_context * 2190 finalize_create_context_locked(struct drm_i915_file_private *file_priv, 2191 struct i915_gem_proto_context *pc, u32 id) 2192 { 2193 struct i915_gem_context *ctx; 2194 void *old; 2195 2196 lockdep_assert_held(&file_priv->proto_context_lock); 2197 2198 ctx = i915_gem_create_context(file_priv->dev_priv, pc); 2199 if (IS_ERR(ctx)) 2200 return ctx; 2201 2202 gem_context_register(ctx, file_priv, id); 2203 2204 old = xa_erase(&file_priv->proto_context_xa, id); 2205 GEM_BUG_ON(old != pc); 2206 proto_context_close(file_priv->dev_priv, pc); 2207 2208 /* One for the xarray and one for the caller */ 2209 return i915_gem_context_get(ctx); 2210 } 2211 2212 struct i915_gem_context * 2213 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 2214 { 2215 struct i915_gem_proto_context *pc; 2216 struct i915_gem_context *ctx; 2217 2218 ctx = __context_lookup(file_priv, id); 2219 if (ctx) 2220 return ctx; 2221 2222 mutex_lock(&file_priv->proto_context_lock); 2223 /* Try one more time under the lock */ 2224 ctx = __context_lookup(file_priv, id); 2225 if (!ctx) { 2226 pc = xa_load(&file_priv->proto_context_xa, id); 2227 if (!pc) 2228 ctx = ERR_PTR(-ENOENT); 2229 else 2230 ctx = finalize_create_context_locked(file_priv, pc, id); 2231 } 2232 mutex_unlock(&file_priv->proto_context_lock); 2233 2234 return ctx; 2235 } 2236 2237 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2238 struct drm_file *file) 2239 { 2240 struct drm_i915_private *i915 = to_i915(dev); 2241 struct drm_i915_gem_context_create_ext *args = data; 2242 struct create_ext ext_data; 2243 int ret; 2244 u32 id; 2245 2246 if (!DRIVER_CAPS(i915)->has_logical_contexts) 2247 return -ENODEV; 2248 2249 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 2250 return -EINVAL; 2251 2252 ret = intel_gt_terminally_wedged(to_gt(i915)); 2253 if (ret) 2254 return ret; 2255 2256 ext_data.fpriv = file->driver_priv; 2257 if (client_is_banned(ext_data.fpriv)) { 2258 drm_dbg(&i915->drm, 2259 "client %s[%d] banned from creating ctx\n", 2260 current->comm, task_pid_nr(current)); 2261 return -EIO; 2262 } 2263 2264 ext_data.pc = proto_context_create(i915, args->flags); 2265 if (IS_ERR(ext_data.pc)) 2266 return PTR_ERR(ext_data.pc); 2267 2268 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 2269 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 2270 create_extensions, 2271 ARRAY_SIZE(create_extensions), 2272 &ext_data); 2273 if (ret) 2274 goto err_pc; 2275 } 2276 2277 if (GRAPHICS_VER(i915) > 12) { 2278 struct i915_gem_context *ctx; 2279 2280 /* Get ourselves a context ID */ 2281 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL, 2282 xa_limit_32b, GFP_KERNEL); 2283 if (ret) 2284 goto err_pc; 2285 2286 ctx = i915_gem_create_context(i915, ext_data.pc); 2287 if (IS_ERR(ctx)) { 2288 ret = PTR_ERR(ctx); 2289 goto err_pc; 2290 } 2291 2292 proto_context_close(i915, ext_data.pc); 2293 gem_context_register(ctx, ext_data.fpriv, id); 2294 } else { 2295 ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id); 2296 if (ret < 0) 2297 goto err_pc; 2298 } 2299 2300 args->ctx_id = id; 2301 2302 return 0; 2303 2304 err_pc: 2305 proto_context_close(i915, ext_data.pc); 2306 return ret; 2307 } 2308 2309 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2310 struct drm_file *file) 2311 { 2312 struct drm_i915_gem_context_destroy *args = data; 2313 struct drm_i915_file_private *file_priv = file->driver_priv; 2314 struct i915_gem_proto_context *pc; 2315 struct i915_gem_context *ctx; 2316 2317 if (args->pad != 0) 2318 return -EINVAL; 2319 2320 if (!args->ctx_id) 2321 return -ENOENT; 2322 2323 /* We need to hold the proto-context lock here to prevent races 2324 * with finalize_create_context_locked(). 2325 */ 2326 mutex_lock(&file_priv->proto_context_lock); 2327 ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2328 pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id); 2329 mutex_unlock(&file_priv->proto_context_lock); 2330 2331 if (!ctx && !pc) 2332 return -ENOENT; 2333 GEM_WARN_ON(ctx && pc); 2334 2335 if (pc) 2336 proto_context_close(file_priv->dev_priv, pc); 2337 2338 if (ctx) 2339 context_close(ctx); 2340 2341 return 0; 2342 } 2343 2344 static int get_sseu(struct i915_gem_context *ctx, 2345 struct drm_i915_gem_context_param *args) 2346 { 2347 struct drm_i915_gem_context_param_sseu user_sseu; 2348 struct intel_context *ce; 2349 unsigned long lookup; 2350 int err; 2351 2352 if (args->size == 0) 2353 goto out; 2354 else if (args->size < sizeof(user_sseu)) 2355 return -EINVAL; 2356 2357 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2358 sizeof(user_sseu))) 2359 return -EFAULT; 2360 2361 if (user_sseu.rsvd) 2362 return -EINVAL; 2363 2364 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2365 return -EINVAL; 2366 2367 lookup = 0; 2368 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2369 lookup |= LOOKUP_USER_INDEX; 2370 2371 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2372 if (IS_ERR(ce)) 2373 return PTR_ERR(ce); 2374 2375 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2376 if (err) { 2377 intel_context_put(ce); 2378 return err; 2379 } 2380 2381 user_sseu.slice_mask = ce->sseu.slice_mask; 2382 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2383 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2384 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2385 2386 intel_context_unlock_pinned(ce); 2387 intel_context_put(ce); 2388 2389 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2390 sizeof(user_sseu))) 2391 return -EFAULT; 2392 2393 out: 2394 args->size = sizeof(user_sseu); 2395 2396 return 0; 2397 } 2398 2399 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2400 struct drm_file *file) 2401 { 2402 struct drm_i915_file_private *file_priv = file->driver_priv; 2403 struct drm_i915_gem_context_param *args = data; 2404 struct i915_gem_context *ctx; 2405 struct i915_address_space *vm; 2406 int ret = 0; 2407 2408 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2409 if (IS_ERR(ctx)) 2410 return PTR_ERR(ctx); 2411 2412 switch (args->param) { 2413 case I915_CONTEXT_PARAM_GTT_SIZE: 2414 args->size = 0; 2415 vm = i915_gem_context_get_eb_vm(ctx); 2416 args->value = vm->total; 2417 i915_vm_put(vm); 2418 2419 break; 2420 2421 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2422 args->size = 0; 2423 args->value = i915_gem_context_no_error_capture(ctx); 2424 break; 2425 2426 case I915_CONTEXT_PARAM_BANNABLE: 2427 args->size = 0; 2428 args->value = i915_gem_context_is_bannable(ctx); 2429 break; 2430 2431 case I915_CONTEXT_PARAM_RECOVERABLE: 2432 args->size = 0; 2433 args->value = i915_gem_context_is_recoverable(ctx); 2434 break; 2435 2436 case I915_CONTEXT_PARAM_PRIORITY: 2437 args->size = 0; 2438 args->value = ctx->sched.priority; 2439 break; 2440 2441 case I915_CONTEXT_PARAM_SSEU: 2442 ret = get_sseu(ctx, args); 2443 break; 2444 2445 case I915_CONTEXT_PARAM_VM: 2446 ret = get_ppgtt(file_priv, ctx, args); 2447 break; 2448 2449 case I915_CONTEXT_PARAM_PERSISTENCE: 2450 args->size = 0; 2451 args->value = i915_gem_context_is_persistent(ctx); 2452 break; 2453 2454 case I915_CONTEXT_PARAM_PROTECTED_CONTENT: 2455 ret = get_protected(ctx, args); 2456 break; 2457 2458 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2459 case I915_CONTEXT_PARAM_BAN_PERIOD: 2460 case I915_CONTEXT_PARAM_ENGINES: 2461 case I915_CONTEXT_PARAM_RINGSIZE: 2462 default: 2463 ret = -EINVAL; 2464 break; 2465 } 2466 2467 i915_gem_context_put(ctx); 2468 return ret; 2469 } 2470 2471 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2472 struct drm_file *file) 2473 { 2474 struct drm_i915_file_private *file_priv = file->driver_priv; 2475 struct drm_i915_gem_context_param *args = data; 2476 struct i915_gem_proto_context *pc; 2477 struct i915_gem_context *ctx; 2478 int ret = 0; 2479 2480 mutex_lock(&file_priv->proto_context_lock); 2481 ctx = __context_lookup(file_priv, args->ctx_id); 2482 if (!ctx) { 2483 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id); 2484 if (pc) { 2485 /* Contexts should be finalized inside 2486 * GEM_CONTEXT_CREATE starting with graphics 2487 * version 13. 2488 */ 2489 WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12); 2490 ret = set_proto_ctx_param(file_priv, pc, args); 2491 } else { 2492 ret = -ENOENT; 2493 } 2494 } 2495 mutex_unlock(&file_priv->proto_context_lock); 2496 2497 if (ctx) { 2498 ret = ctx_setparam(file_priv, ctx, args); 2499 i915_gem_context_put(ctx); 2500 } 2501 2502 return ret; 2503 } 2504 2505 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2506 void *data, struct drm_file *file) 2507 { 2508 struct drm_i915_private *i915 = to_i915(dev); 2509 struct drm_i915_reset_stats *args = data; 2510 struct i915_gem_context *ctx; 2511 2512 if (args->flags || args->pad) 2513 return -EINVAL; 2514 2515 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); 2516 if (IS_ERR(ctx)) 2517 return PTR_ERR(ctx); 2518 2519 /* 2520 * We opt for unserialised reads here. This may result in tearing 2521 * in the extremely unlikely event of a GPU hang on this context 2522 * as we are querying them. If we need that extra layer of protection, 2523 * we should wrap the hangstats with a seqlock. 2524 */ 2525 2526 if (capable(CAP_SYS_ADMIN)) 2527 args->reset_count = i915_reset_count(&i915->gpu_error); 2528 else 2529 args->reset_count = 0; 2530 2531 args->batch_active = atomic_read(&ctx->guilty_count); 2532 args->batch_pending = atomic_read(&ctx->active_count); 2533 2534 i915_gem_context_put(ctx); 2535 return 0; 2536 } 2537 2538 /* GEM context-engines iterator: for_each_gem_engine() */ 2539 struct intel_context * 2540 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2541 { 2542 const struct i915_gem_engines *e = it->engines; 2543 struct intel_context *ctx; 2544 2545 if (unlikely(!e)) 2546 return NULL; 2547 2548 do { 2549 if (it->idx >= e->num_engines) 2550 return NULL; 2551 2552 ctx = e->engines[it->idx++]; 2553 } while (!ctx); 2554 2555 return ctx; 2556 } 2557 2558 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2559 #include "selftests/mock_context.c" 2560 #include "selftests/i915_gem_context.c" 2561 #endif 2562 2563 void i915_gem_context_module_exit(void) 2564 { 2565 kmem_cache_destroy(slab_luts); 2566 } 2567 2568 int __init i915_gem_context_module_init(void) 2569 { 2570 slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2571 if (!slab_luts) 2572 return -ENOMEM; 2573 2574 return 0; 2575 } 2576