1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2011-2012 Intel Corporation 5 */ 6 7 /* 8 * This file implements HW context support. On gen5+ a HW context consists of an 9 * opaque GPU object which is referenced at times of context saves and restores. 10 * With RC6 enabled, the context is also referenced as the GPU enters and exists 11 * from RC6 (GPU has it's own internal power context, except on gen5). Though 12 * something like a context does exist for the media ring, the code only 13 * supports contexts for the render ring. 14 * 15 * In software, there is a distinction between contexts created by the user, 16 * and the default HW context. The default HW context is used by GPU clients 17 * that do not request setup of their own hardware context. The default 18 * context's state is never restored to help prevent programming errors. This 19 * would happen if a client ran and piggy-backed off another clients GPU state. 20 * The default context only exists to give the GPU some offset to load as the 21 * current to invoke a save of the context we actually care about. In fact, the 22 * code could likely be constructed, albeit in a more complicated fashion, to 23 * never use the default context, though that limits the driver's ability to 24 * swap out, and/or destroy other contexts. 25 * 26 * All other contexts are created as a request by the GPU client. These contexts 27 * store GPU state, and thus allow GPU clients to not re-emit state (and 28 * potentially query certain state) at any time. The kernel driver makes 29 * certain that the appropriate commands are inserted. 30 * 31 * The context life cycle is semi-complicated in that context BOs may live 32 * longer than the context itself because of the way the hardware, and object 33 * tracking works. Below is a very crude representation of the state machine 34 * describing the context life. 35 * refcount pincount active 36 * S0: initial state 0 0 0 37 * S1: context created 1 0 0 38 * S2: context is currently running 2 1 X 39 * S3: GPU referenced, but not current 2 0 1 40 * S4: context is current, but destroyed 1 1 0 41 * S5: like S3, but destroyed 1 0 1 42 * 43 * The most common (but not all) transitions: 44 * S0->S1: client creates a context 45 * S1->S2: client submits execbuf with context 46 * S2->S3: other clients submits execbuf with context 47 * S3->S1: context object was retired 48 * S3->S2: clients submits another execbuf 49 * S2->S4: context destroy called with current context 50 * S3->S5->S0: destroy path 51 * S4->S5->S0: destroy path on current context 52 * 53 * There are two confusing terms used above: 54 * The "current context" means the context which is currently running on the 55 * GPU. The GPU has loaded its state already and has stored away the gtt 56 * offset of the BO. The GPU is not actively referencing the data at this 57 * offset, but it will on the next context switch. The only way to avoid this 58 * is to do a GPU reset. 59 * 60 * An "active context' is one which was previously the "current context" and is 61 * on the active list waiting for the next context switch to occur. Until this 62 * happens, the object must remain at the same gtt offset. It is therefore 63 * possible to destroy a context, but it is still active. 64 * 65 */ 66 67 #include <linux/log2.h> 68 #include <linux/nospec.h> 69 70 #include <drm/drm_syncobj.h> 71 72 #include "gt/gen6_ppgtt.h" 73 #include "gt/intel_context.h" 74 #include "gt/intel_context_param.h" 75 #include "gt/intel_engine_heartbeat.h" 76 #include "gt/intel_engine_user.h" 77 #include "gt/intel_gpu_commands.h" 78 #include "gt/intel_ring.h" 79 80 #include "i915_gem_context.h" 81 #include "i915_trace.h" 82 #include "i915_user_extensions.h" 83 84 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 85 86 static struct kmem_cache *slab_luts; 87 88 struct i915_lut_handle *i915_lut_handle_alloc(void) 89 { 90 return kmem_cache_alloc(slab_luts, GFP_KERNEL); 91 } 92 93 void i915_lut_handle_free(struct i915_lut_handle *lut) 94 { 95 return kmem_cache_free(slab_luts, lut); 96 } 97 98 static void lut_close(struct i915_gem_context *ctx) 99 { 100 struct radix_tree_iter iter; 101 void __rcu **slot; 102 103 mutex_lock(&ctx->lut_mutex); 104 rcu_read_lock(); 105 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 106 struct i915_vma *vma = rcu_dereference_raw(*slot); 107 struct drm_i915_gem_object *obj = vma->obj; 108 struct i915_lut_handle *lut; 109 110 if (!kref_get_unless_zero(&obj->base.refcount)) 111 continue; 112 113 spin_lock(&obj->lut_lock); 114 list_for_each_entry(lut, &obj->lut_list, obj_link) { 115 if (lut->ctx != ctx) 116 continue; 117 118 if (lut->handle != iter.index) 119 continue; 120 121 list_del(&lut->obj_link); 122 break; 123 } 124 spin_unlock(&obj->lut_lock); 125 126 if (&lut->obj_link != &obj->lut_list) { 127 i915_lut_handle_free(lut); 128 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 129 i915_vma_close(vma); 130 i915_gem_object_put(obj); 131 } 132 133 i915_gem_object_put(obj); 134 } 135 rcu_read_unlock(); 136 mutex_unlock(&ctx->lut_mutex); 137 } 138 139 static struct intel_context * 140 lookup_user_engine(struct i915_gem_context *ctx, 141 unsigned long flags, 142 const struct i915_engine_class_instance *ci) 143 #define LOOKUP_USER_INDEX BIT(0) 144 { 145 int idx; 146 147 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 148 return ERR_PTR(-EINVAL); 149 150 if (!i915_gem_context_user_engines(ctx)) { 151 struct intel_engine_cs *engine; 152 153 engine = intel_engine_lookup_user(ctx->i915, 154 ci->engine_class, 155 ci->engine_instance); 156 if (!engine) 157 return ERR_PTR(-EINVAL); 158 159 idx = engine->legacy_idx; 160 } else { 161 idx = ci->engine_instance; 162 } 163 164 return i915_gem_context_get_engine(ctx, idx); 165 } 166 167 static int validate_priority(struct drm_i915_private *i915, 168 const struct drm_i915_gem_context_param *args) 169 { 170 s64 priority = args->value; 171 172 if (args->size) 173 return -EINVAL; 174 175 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 176 return -ENODEV; 177 178 if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 179 priority < I915_CONTEXT_MIN_USER_PRIORITY) 180 return -EINVAL; 181 182 if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 183 !capable(CAP_SYS_NICE)) 184 return -EPERM; 185 186 return 0; 187 } 188 189 static void proto_context_close(struct i915_gem_proto_context *pc) 190 { 191 int i; 192 193 if (pc->vm) 194 i915_vm_put(pc->vm); 195 if (pc->user_engines) { 196 for (i = 0; i < pc->num_user_engines; i++) 197 kfree(pc->user_engines[i].siblings); 198 kfree(pc->user_engines); 199 } 200 kfree(pc); 201 } 202 203 static int proto_context_set_persistence(struct drm_i915_private *i915, 204 struct i915_gem_proto_context *pc, 205 bool persist) 206 { 207 if (persist) { 208 /* 209 * Only contexts that are short-lived [that will expire or be 210 * reset] are allowed to survive past termination. We require 211 * hangcheck to ensure that the persistent requests are healthy. 212 */ 213 if (!i915->params.enable_hangcheck) 214 return -EINVAL; 215 216 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); 217 } else { 218 /* To cancel a context we use "preempt-to-idle" */ 219 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 220 return -ENODEV; 221 222 /* 223 * If the cancel fails, we then need to reset, cleanly! 224 * 225 * If the per-engine reset fails, all hope is lost! We resort 226 * to a full GPU reset in that unlikely case, but realistically 227 * if the engine could not reset, the full reset does not fare 228 * much better. The damage has been done. 229 * 230 * However, if we cannot reset an engine by itself, we cannot 231 * cleanup a hanging persistent context without causing 232 * colateral damage, and we should not pretend we can by 233 * exposing the interface. 234 */ 235 if (!intel_has_reset_engine(&i915->gt)) 236 return -ENODEV; 237 238 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); 239 } 240 241 return 0; 242 } 243 244 static struct i915_gem_proto_context * 245 proto_context_create(struct drm_i915_private *i915, unsigned int flags) 246 { 247 struct i915_gem_proto_context *pc, *err; 248 249 pc = kzalloc(sizeof(*pc), GFP_KERNEL); 250 if (!pc) 251 return ERR_PTR(-ENOMEM); 252 253 pc->num_user_engines = -1; 254 pc->user_engines = NULL; 255 pc->user_flags = BIT(UCONTEXT_BANNABLE) | 256 BIT(UCONTEXT_RECOVERABLE); 257 if (i915->params.enable_hangcheck) 258 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); 259 pc->sched.priority = I915_PRIORITY_NORMAL; 260 261 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 262 if (!HAS_EXECLISTS(i915)) { 263 err = ERR_PTR(-EINVAL); 264 goto proto_close; 265 } 266 pc->single_timeline = true; 267 } 268 269 return pc; 270 271 proto_close: 272 proto_context_close(pc); 273 return err; 274 } 275 276 static int proto_context_register_locked(struct drm_i915_file_private *fpriv, 277 struct i915_gem_proto_context *pc, 278 u32 *id) 279 { 280 int ret; 281 void *old; 282 283 lockdep_assert_held(&fpriv->proto_context_lock); 284 285 ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL); 286 if (ret) 287 return ret; 288 289 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL); 290 if (xa_is_err(old)) { 291 xa_erase(&fpriv->context_xa, *id); 292 return xa_err(old); 293 } 294 WARN_ON(old); 295 296 return 0; 297 } 298 299 static int proto_context_register(struct drm_i915_file_private *fpriv, 300 struct i915_gem_proto_context *pc, 301 u32 *id) 302 { 303 int ret; 304 305 mutex_lock(&fpriv->proto_context_lock); 306 ret = proto_context_register_locked(fpriv, pc, id); 307 mutex_unlock(&fpriv->proto_context_lock); 308 309 return ret; 310 } 311 312 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv, 313 struct i915_gem_proto_context *pc, 314 const struct drm_i915_gem_context_param *args) 315 { 316 struct drm_i915_private *i915 = fpriv->dev_priv; 317 struct i915_address_space *vm; 318 319 if (args->size) 320 return -EINVAL; 321 322 if (!HAS_FULL_PPGTT(i915)) 323 return -ENODEV; 324 325 if (upper_32_bits(args->value)) 326 return -ENOENT; 327 328 vm = i915_gem_vm_lookup(fpriv, args->value); 329 if (!vm) 330 return -ENOENT; 331 332 if (pc->vm) 333 i915_vm_put(pc->vm); 334 pc->vm = vm; 335 336 return 0; 337 } 338 339 struct set_proto_ctx_engines { 340 struct drm_i915_private *i915; 341 unsigned num_engines; 342 struct i915_gem_proto_engine *engines; 343 }; 344 345 static int 346 set_proto_ctx_engines_balance(struct i915_user_extension __user *base, 347 void *data) 348 { 349 struct i915_context_engines_load_balance __user *ext = 350 container_of_user(base, typeof(*ext), base); 351 const struct set_proto_ctx_engines *set = data; 352 struct drm_i915_private *i915 = set->i915; 353 struct intel_engine_cs **siblings; 354 u16 num_siblings, idx; 355 unsigned int n; 356 int err; 357 358 if (!HAS_EXECLISTS(i915)) 359 return -ENODEV; 360 361 if (get_user(idx, &ext->engine_index)) 362 return -EFAULT; 363 364 if (idx >= set->num_engines) { 365 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 366 idx, set->num_engines); 367 return -EINVAL; 368 } 369 370 idx = array_index_nospec(idx, set->num_engines); 371 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) { 372 drm_dbg(&i915->drm, 373 "Invalid placement[%d], already occupied\n", idx); 374 return -EEXIST; 375 } 376 377 if (get_user(num_siblings, &ext->num_siblings)) 378 return -EFAULT; 379 380 err = check_user_mbz(&ext->flags); 381 if (err) 382 return err; 383 384 err = check_user_mbz(&ext->mbz64); 385 if (err) 386 return err; 387 388 if (num_siblings == 0) 389 return 0; 390 391 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL); 392 if (!siblings) 393 return -ENOMEM; 394 395 for (n = 0; n < num_siblings; n++) { 396 struct i915_engine_class_instance ci; 397 398 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 399 err = -EFAULT; 400 goto err_siblings; 401 } 402 403 siblings[n] = intel_engine_lookup_user(i915, 404 ci.engine_class, 405 ci.engine_instance); 406 if (!siblings[n]) { 407 drm_dbg(&i915->drm, 408 "Invalid sibling[%d]: { class:%d, inst:%d }\n", 409 n, ci.engine_class, ci.engine_instance); 410 err = -EINVAL; 411 goto err_siblings; 412 } 413 } 414 415 if (num_siblings == 1) { 416 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL; 417 set->engines[idx].engine = siblings[0]; 418 kfree(siblings); 419 } else { 420 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED; 421 set->engines[idx].num_siblings = num_siblings; 422 set->engines[idx].siblings = siblings; 423 } 424 425 return 0; 426 427 err_siblings: 428 kfree(siblings); 429 430 return err; 431 } 432 433 static int 434 set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) 435 { 436 struct i915_context_engines_bond __user *ext = 437 container_of_user(base, typeof(*ext), base); 438 const struct set_proto_ctx_engines *set = data; 439 struct drm_i915_private *i915 = set->i915; 440 struct i915_engine_class_instance ci; 441 struct intel_engine_cs *master; 442 u16 idx, num_bonds; 443 int err, n; 444 445 if (get_user(idx, &ext->virtual_index)) 446 return -EFAULT; 447 448 if (idx >= set->num_engines) { 449 drm_dbg(&i915->drm, 450 "Invalid index for virtual engine: %d >= %d\n", 451 idx, set->num_engines); 452 return -EINVAL; 453 } 454 455 idx = array_index_nospec(idx, set->num_engines); 456 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) { 457 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 458 return -EINVAL; 459 } 460 461 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) { 462 drm_dbg(&i915->drm, 463 "Bonding with virtual engines not allowed\n"); 464 return -EINVAL; 465 } 466 467 err = check_user_mbz(&ext->flags); 468 if (err) 469 return err; 470 471 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 472 err = check_user_mbz(&ext->mbz64[n]); 473 if (err) 474 return err; 475 } 476 477 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 478 return -EFAULT; 479 480 master = intel_engine_lookup_user(i915, 481 ci.engine_class, 482 ci.engine_instance); 483 if (!master) { 484 drm_dbg(&i915->drm, 485 "Unrecognised master engine: { class:%u, instance:%u }\n", 486 ci.engine_class, ci.engine_instance); 487 return -EINVAL; 488 } 489 490 if (intel_engine_uses_guc(master)) { 491 DRM_DEBUG("bonding extension not supported with GuC submission"); 492 return -ENODEV; 493 } 494 495 if (get_user(num_bonds, &ext->num_bonds)) 496 return -EFAULT; 497 498 for (n = 0; n < num_bonds; n++) { 499 struct intel_engine_cs *bond; 500 501 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 502 return -EFAULT; 503 504 bond = intel_engine_lookup_user(i915, 505 ci.engine_class, 506 ci.engine_instance); 507 if (!bond) { 508 drm_dbg(&i915->drm, 509 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 510 n, ci.engine_class, ci.engine_instance); 511 return -EINVAL; 512 } 513 } 514 515 return 0; 516 } 517 518 static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = { 519 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance, 520 [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond, 521 }; 522 523 static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv, 524 struct i915_gem_proto_context *pc, 525 const struct drm_i915_gem_context_param *args) 526 { 527 struct drm_i915_private *i915 = fpriv->dev_priv; 528 struct set_proto_ctx_engines set = { .i915 = i915 }; 529 struct i915_context_param_engines __user *user = 530 u64_to_user_ptr(args->value); 531 unsigned int n; 532 u64 extensions; 533 int err; 534 535 if (pc->num_user_engines >= 0) { 536 drm_dbg(&i915->drm, "Cannot set engines twice"); 537 return -EINVAL; 538 } 539 540 if (args->size < sizeof(*user) || 541 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) { 542 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 543 args->size); 544 return -EINVAL; 545 } 546 547 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 548 /* RING_MASK has no shift so we can use it directly here */ 549 if (set.num_engines > I915_EXEC_RING_MASK + 1) 550 return -EINVAL; 551 552 set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL); 553 if (!set.engines) 554 return -ENOMEM; 555 556 for (n = 0; n < set.num_engines; n++) { 557 struct i915_engine_class_instance ci; 558 struct intel_engine_cs *engine; 559 560 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 561 kfree(set.engines); 562 return -EFAULT; 563 } 564 565 memset(&set.engines[n], 0, sizeof(set.engines[n])); 566 567 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 568 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) 569 continue; 570 571 engine = intel_engine_lookup_user(i915, 572 ci.engine_class, 573 ci.engine_instance); 574 if (!engine) { 575 drm_dbg(&i915->drm, 576 "Invalid engine[%d]: { class:%d, instance:%d }\n", 577 n, ci.engine_class, ci.engine_instance); 578 kfree(set.engines); 579 return -ENOENT; 580 } 581 582 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL; 583 set.engines[n].engine = engine; 584 } 585 586 err = -EFAULT; 587 if (!get_user(extensions, &user->extensions)) 588 err = i915_user_extensions(u64_to_user_ptr(extensions), 589 set_proto_ctx_engines_extensions, 590 ARRAY_SIZE(set_proto_ctx_engines_extensions), 591 &set); 592 if (err) { 593 kfree(set.engines); 594 return err; 595 } 596 597 pc->num_user_engines = set.num_engines; 598 pc->user_engines = set.engines; 599 600 return 0; 601 } 602 603 static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv, 604 struct i915_gem_proto_context *pc, 605 struct drm_i915_gem_context_param *args) 606 { 607 struct drm_i915_private *i915 = fpriv->dev_priv; 608 struct drm_i915_gem_context_param_sseu user_sseu; 609 struct intel_sseu *sseu; 610 int ret; 611 612 if (args->size < sizeof(user_sseu)) 613 return -EINVAL; 614 615 if (GRAPHICS_VER(i915) != 11) 616 return -ENODEV; 617 618 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 619 sizeof(user_sseu))) 620 return -EFAULT; 621 622 if (user_sseu.rsvd) 623 return -EINVAL; 624 625 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 626 return -EINVAL; 627 628 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0)) 629 return -EINVAL; 630 631 if (pc->num_user_engines >= 0) { 632 int idx = user_sseu.engine.engine_instance; 633 struct i915_gem_proto_engine *pe; 634 635 if (idx >= pc->num_user_engines) 636 return -EINVAL; 637 638 pe = &pc->user_engines[idx]; 639 640 /* Only render engine supports RPCS configuration. */ 641 if (pe->engine->class != RENDER_CLASS) 642 return -EINVAL; 643 644 sseu = &pe->sseu; 645 } else { 646 /* Only render engine supports RPCS configuration. */ 647 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER) 648 return -EINVAL; 649 650 /* There is only one render engine */ 651 if (user_sseu.engine.engine_instance != 0) 652 return -EINVAL; 653 654 sseu = &pc->legacy_rcs_sseu; 655 } 656 657 ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu); 658 if (ret) 659 return ret; 660 661 args->size = sizeof(user_sseu); 662 663 return 0; 664 } 665 666 static int set_proto_ctx_param(struct drm_i915_file_private *fpriv, 667 struct i915_gem_proto_context *pc, 668 struct drm_i915_gem_context_param *args) 669 { 670 int ret = 0; 671 672 switch (args->param) { 673 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 674 if (args->size) 675 ret = -EINVAL; 676 else if (args->value) 677 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE); 678 else 679 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE); 680 break; 681 682 case I915_CONTEXT_PARAM_BANNABLE: 683 if (args->size) 684 ret = -EINVAL; 685 else if (!capable(CAP_SYS_ADMIN) && !args->value) 686 ret = -EPERM; 687 else if (args->value) 688 pc->user_flags |= BIT(UCONTEXT_BANNABLE); 689 else 690 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); 691 break; 692 693 case I915_CONTEXT_PARAM_RECOVERABLE: 694 if (args->size) 695 ret = -EINVAL; 696 else if (args->value) 697 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); 698 else 699 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); 700 break; 701 702 case I915_CONTEXT_PARAM_PRIORITY: 703 ret = validate_priority(fpriv->dev_priv, args); 704 if (!ret) 705 pc->sched.priority = args->value; 706 break; 707 708 case I915_CONTEXT_PARAM_SSEU: 709 ret = set_proto_ctx_sseu(fpriv, pc, args); 710 break; 711 712 case I915_CONTEXT_PARAM_VM: 713 ret = set_proto_ctx_vm(fpriv, pc, args); 714 break; 715 716 case I915_CONTEXT_PARAM_ENGINES: 717 ret = set_proto_ctx_engines(fpriv, pc, args); 718 break; 719 720 case I915_CONTEXT_PARAM_PERSISTENCE: 721 if (args->size) 722 ret = -EINVAL; 723 ret = proto_context_set_persistence(fpriv->dev_priv, pc, 724 args->value); 725 break; 726 727 case I915_CONTEXT_PARAM_NO_ZEROMAP: 728 case I915_CONTEXT_PARAM_BAN_PERIOD: 729 case I915_CONTEXT_PARAM_RINGSIZE: 730 default: 731 ret = -EINVAL; 732 break; 733 } 734 735 return ret; 736 } 737 738 static struct i915_address_space * 739 context_get_vm_rcu(struct i915_gem_context *ctx) 740 { 741 GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); 742 743 do { 744 struct i915_address_space *vm; 745 746 /* 747 * We do not allow downgrading from full-ppgtt [to a shared 748 * global gtt], so ctx->vm cannot become NULL. 749 */ 750 vm = rcu_dereference(ctx->vm); 751 if (!kref_get_unless_zero(&vm->ref)) 752 continue; 753 754 /* 755 * This ppgtt may have be reallocated between 756 * the read and the kref, and reassigned to a third 757 * context. In order to avoid inadvertent sharing 758 * of this ppgtt with that third context (and not 759 * src), we have to confirm that we have the same 760 * ppgtt after passing through the strong memory 761 * barrier implied by a successful 762 * kref_get_unless_zero(). 763 * 764 * Once we have acquired the current ppgtt of ctx, 765 * we no longer care if it is released from ctx, as 766 * it cannot be reallocated elsewhere. 767 */ 768 769 if (vm == rcu_access_pointer(ctx->vm)) 770 return rcu_pointer_handoff(vm); 771 772 i915_vm_put(vm); 773 } while (1); 774 } 775 776 static int intel_context_set_gem(struct intel_context *ce, 777 struct i915_gem_context *ctx, 778 struct intel_sseu sseu) 779 { 780 int ret = 0; 781 782 GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 783 RCU_INIT_POINTER(ce->gem_context, ctx); 784 785 ce->ring_size = SZ_16K; 786 787 if (rcu_access_pointer(ctx->vm)) { 788 struct i915_address_space *vm; 789 790 rcu_read_lock(); 791 vm = context_get_vm_rcu(ctx); /* hmm */ 792 rcu_read_unlock(); 793 794 i915_vm_put(ce->vm); 795 ce->vm = vm; 796 } 797 798 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 799 intel_engine_has_timeslices(ce->engine) && 800 intel_engine_has_semaphores(ce->engine)) 801 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 802 803 if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) && 804 ctx->i915->params.request_timeout_ms) { 805 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; 806 807 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000); 808 } 809 810 /* A valid SSEU has no zero fields */ 811 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) 812 ret = intel_context_reconfigure_sseu(ce, sseu); 813 814 return ret; 815 } 816 817 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 818 { 819 while (count--) { 820 if (!e->engines[count]) 821 continue; 822 823 intel_context_put(e->engines[count]); 824 } 825 kfree(e); 826 } 827 828 static void free_engines(struct i915_gem_engines *e) 829 { 830 __free_engines(e, e->num_engines); 831 } 832 833 static void free_engines_rcu(struct rcu_head *rcu) 834 { 835 struct i915_gem_engines *engines = 836 container_of(rcu, struct i915_gem_engines, rcu); 837 838 i915_sw_fence_fini(&engines->fence); 839 free_engines(engines); 840 } 841 842 static int __i915_sw_fence_call 843 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 844 { 845 struct i915_gem_engines *engines = 846 container_of(fence, typeof(*engines), fence); 847 848 switch (state) { 849 case FENCE_COMPLETE: 850 if (!list_empty(&engines->link)) { 851 struct i915_gem_context *ctx = engines->ctx; 852 unsigned long flags; 853 854 spin_lock_irqsave(&ctx->stale.lock, flags); 855 list_del(&engines->link); 856 spin_unlock_irqrestore(&ctx->stale.lock, flags); 857 } 858 i915_gem_context_put(engines->ctx); 859 break; 860 861 case FENCE_FREE: 862 init_rcu_head(&engines->rcu); 863 call_rcu(&engines->rcu, free_engines_rcu); 864 break; 865 } 866 867 return NOTIFY_DONE; 868 } 869 870 static struct i915_gem_engines *alloc_engines(unsigned int count) 871 { 872 struct i915_gem_engines *e; 873 874 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); 875 if (!e) 876 return NULL; 877 878 i915_sw_fence_init(&e->fence, engines_notify); 879 return e; 880 } 881 882 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx, 883 struct intel_sseu rcs_sseu) 884 { 885 const struct intel_gt *gt = &ctx->i915->gt; 886 struct intel_engine_cs *engine; 887 struct i915_gem_engines *e, *err; 888 enum intel_engine_id id; 889 890 e = alloc_engines(I915_NUM_ENGINES); 891 if (!e) 892 return ERR_PTR(-ENOMEM); 893 894 for_each_engine(engine, gt, id) { 895 struct intel_context *ce; 896 struct intel_sseu sseu = {}; 897 int ret; 898 899 if (engine->legacy_idx == INVALID_ENGINE) 900 continue; 901 902 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 903 GEM_BUG_ON(e->engines[engine->legacy_idx]); 904 905 ce = intel_context_create(engine); 906 if (IS_ERR(ce)) { 907 err = ERR_CAST(ce); 908 goto free_engines; 909 } 910 911 e->engines[engine->legacy_idx] = ce; 912 e->num_engines = max(e->num_engines, engine->legacy_idx + 1); 913 914 if (engine->class == RENDER_CLASS) 915 sseu = rcs_sseu; 916 917 ret = intel_context_set_gem(ce, ctx, sseu); 918 if (ret) { 919 err = ERR_PTR(ret); 920 goto free_engines; 921 } 922 923 } 924 925 return e; 926 927 free_engines: 928 free_engines(e); 929 return err; 930 } 931 932 static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx, 933 unsigned int num_engines, 934 struct i915_gem_proto_engine *pe) 935 { 936 struct i915_gem_engines *e, *err; 937 unsigned int n; 938 939 e = alloc_engines(num_engines); 940 for (n = 0; n < num_engines; n++) { 941 struct intel_context *ce; 942 int ret; 943 944 switch (pe[n].type) { 945 case I915_GEM_ENGINE_TYPE_PHYSICAL: 946 ce = intel_context_create(pe[n].engine); 947 break; 948 949 case I915_GEM_ENGINE_TYPE_BALANCED: 950 ce = intel_engine_create_virtual(pe[n].siblings, 951 pe[n].num_siblings); 952 break; 953 954 case I915_GEM_ENGINE_TYPE_INVALID: 955 default: 956 GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID); 957 continue; 958 } 959 960 if (IS_ERR(ce)) { 961 err = ERR_CAST(ce); 962 goto free_engines; 963 } 964 965 e->engines[n] = ce; 966 967 ret = intel_context_set_gem(ce, ctx, pe->sseu); 968 if (ret) { 969 err = ERR_PTR(ret); 970 goto free_engines; 971 } 972 } 973 e->num_engines = num_engines; 974 975 return e; 976 977 free_engines: 978 free_engines(e); 979 return err; 980 } 981 982 void i915_gem_context_release(struct kref *ref) 983 { 984 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 985 986 trace_i915_context_free(ctx); 987 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 988 989 mutex_destroy(&ctx->engines_mutex); 990 mutex_destroy(&ctx->lut_mutex); 991 992 put_pid(ctx->pid); 993 mutex_destroy(&ctx->mutex); 994 995 kfree_rcu(ctx, rcu); 996 } 997 998 static inline struct i915_gem_engines * 999 __context_engines_static(const struct i915_gem_context *ctx) 1000 { 1001 return rcu_dereference_protected(ctx->engines, true); 1002 } 1003 1004 static void __reset_context(struct i915_gem_context *ctx, 1005 struct intel_engine_cs *engine) 1006 { 1007 intel_gt_handle_error(engine->gt, engine->mask, 0, 1008 "context closure in %s", ctx->name); 1009 } 1010 1011 static bool __cancel_engine(struct intel_engine_cs *engine) 1012 { 1013 /* 1014 * Send a "high priority pulse" down the engine to cause the 1015 * current request to be momentarily preempted. (If it fails to 1016 * be preempted, it will be reset). As we have marked our context 1017 * as banned, any incomplete request, including any running, will 1018 * be skipped following the preemption. 1019 * 1020 * If there is no hangchecking (one of the reasons why we try to 1021 * cancel the context) and no forced preemption, there may be no 1022 * means by which we reset the GPU and evict the persistent hog. 1023 * Ergo if we are unable to inject a preemptive pulse that can 1024 * kill the banned context, we fallback to doing a local reset 1025 * instead. 1026 */ 1027 return intel_engine_pulse(engine) == 0; 1028 } 1029 1030 static struct intel_engine_cs *active_engine(struct intel_context *ce) 1031 { 1032 struct intel_engine_cs *engine = NULL; 1033 struct i915_request *rq; 1034 1035 if (intel_context_has_inflight(ce)) 1036 return intel_context_inflight(ce); 1037 1038 if (!ce->timeline) 1039 return NULL; 1040 1041 /* 1042 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference 1043 * to the request to prevent it being transferred to a new timeline 1044 * (and onto a new timeline->requests list). 1045 */ 1046 rcu_read_lock(); 1047 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 1048 bool found; 1049 1050 /* timeline is already completed upto this point? */ 1051 if (!i915_request_get_rcu(rq)) 1052 break; 1053 1054 /* Check with the backend if the request is inflight */ 1055 found = true; 1056 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) 1057 found = i915_request_active_engine(rq, &engine); 1058 1059 i915_request_put(rq); 1060 if (found) 1061 break; 1062 } 1063 rcu_read_unlock(); 1064 1065 return engine; 1066 } 1067 1068 static void kill_engines(struct i915_gem_engines *engines, bool ban) 1069 { 1070 struct i915_gem_engines_iter it; 1071 struct intel_context *ce; 1072 1073 /* 1074 * Map the user's engine back to the actual engines; one virtual 1075 * engine will be mapped to multiple engines, and using ctx->engine[] 1076 * the same engine may be have multiple instances in the user's map. 1077 * However, we only care about pending requests, so only include 1078 * engines on which there are incomplete requests. 1079 */ 1080 for_each_gem_engine(ce, engines, it) { 1081 struct intel_engine_cs *engine; 1082 1083 if (ban && intel_context_ban(ce, NULL)) 1084 continue; 1085 1086 /* 1087 * Check the current active state of this context; if we 1088 * are currently executing on the GPU we need to evict 1089 * ourselves. On the other hand, if we haven't yet been 1090 * submitted to the GPU or if everything is complete, 1091 * we have nothing to do. 1092 */ 1093 engine = active_engine(ce); 1094 1095 /* First attempt to gracefully cancel the context */ 1096 if (engine && !__cancel_engine(engine) && ban) 1097 /* 1098 * If we are unable to send a preemptive pulse to bump 1099 * the context from the GPU, we have to resort to a full 1100 * reset. We hope the collateral damage is worth it. 1101 */ 1102 __reset_context(engines->ctx, engine); 1103 } 1104 } 1105 1106 static void kill_context(struct i915_gem_context *ctx) 1107 { 1108 bool ban = (!i915_gem_context_is_persistent(ctx) || 1109 !ctx->i915->params.enable_hangcheck); 1110 struct i915_gem_engines *pos, *next; 1111 1112 spin_lock_irq(&ctx->stale.lock); 1113 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 1114 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { 1115 if (!i915_sw_fence_await(&pos->fence)) { 1116 list_del_init(&pos->link); 1117 continue; 1118 } 1119 1120 spin_unlock_irq(&ctx->stale.lock); 1121 1122 kill_engines(pos, ban); 1123 1124 spin_lock_irq(&ctx->stale.lock); 1125 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); 1126 list_safe_reset_next(pos, next, link); 1127 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ 1128 1129 i915_sw_fence_complete(&pos->fence); 1130 } 1131 spin_unlock_irq(&ctx->stale.lock); 1132 } 1133 1134 static void engines_idle_release(struct i915_gem_context *ctx, 1135 struct i915_gem_engines *engines) 1136 { 1137 struct i915_gem_engines_iter it; 1138 struct intel_context *ce; 1139 1140 INIT_LIST_HEAD(&engines->link); 1141 1142 engines->ctx = i915_gem_context_get(ctx); 1143 1144 for_each_gem_engine(ce, engines, it) { 1145 int err; 1146 1147 /* serialises with execbuf */ 1148 set_bit(CONTEXT_CLOSED_BIT, &ce->flags); 1149 if (!intel_context_pin_if_active(ce)) 1150 continue; 1151 1152 /* Wait until context is finally scheduled out and retired */ 1153 err = i915_sw_fence_await_active(&engines->fence, 1154 &ce->active, 1155 I915_ACTIVE_AWAIT_BARRIER); 1156 intel_context_unpin(ce); 1157 if (err) 1158 goto kill; 1159 } 1160 1161 spin_lock_irq(&ctx->stale.lock); 1162 if (!i915_gem_context_is_closed(ctx)) 1163 list_add_tail(&engines->link, &ctx->stale.engines); 1164 spin_unlock_irq(&ctx->stale.lock); 1165 1166 kill: 1167 if (list_empty(&engines->link)) /* raced, already closed */ 1168 kill_engines(engines, true); 1169 1170 i915_sw_fence_commit(&engines->fence); 1171 } 1172 1173 static void set_closed_name(struct i915_gem_context *ctx) 1174 { 1175 char *s; 1176 1177 /* Replace '[]' with '<>' to indicate closed in debug prints */ 1178 1179 s = strrchr(ctx->name, '['); 1180 if (!s) 1181 return; 1182 1183 *s = '<'; 1184 1185 s = strchr(s + 1, ']'); 1186 if (s) 1187 *s = '>'; 1188 } 1189 1190 static void context_close(struct i915_gem_context *ctx) 1191 { 1192 struct i915_address_space *vm; 1193 1194 /* Flush any concurrent set_engines() */ 1195 mutex_lock(&ctx->engines_mutex); 1196 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); 1197 i915_gem_context_set_closed(ctx); 1198 mutex_unlock(&ctx->engines_mutex); 1199 1200 mutex_lock(&ctx->mutex); 1201 1202 set_closed_name(ctx); 1203 1204 vm = i915_gem_context_vm(ctx); 1205 if (vm) 1206 i915_vm_close(vm); 1207 1208 if (ctx->syncobj) 1209 drm_syncobj_put(ctx->syncobj); 1210 1211 ctx->file_priv = ERR_PTR(-EBADF); 1212 1213 /* 1214 * The LUT uses the VMA as a backpointer to unref the object, 1215 * so we need to clear the LUT before we close all the VMA (inside 1216 * the ppgtt). 1217 */ 1218 lut_close(ctx); 1219 1220 spin_lock(&ctx->i915->gem.contexts.lock); 1221 list_del(&ctx->link); 1222 spin_unlock(&ctx->i915->gem.contexts.lock); 1223 1224 mutex_unlock(&ctx->mutex); 1225 1226 /* 1227 * If the user has disabled hangchecking, we can not be sure that 1228 * the batches will ever complete after the context is closed, 1229 * keeping the context and all resources pinned forever. So in this 1230 * case we opt to forcibly kill off all remaining requests on 1231 * context close. 1232 */ 1233 kill_context(ctx); 1234 1235 i915_gem_context_put(ctx); 1236 } 1237 1238 static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 1239 { 1240 if (i915_gem_context_is_persistent(ctx) == state) 1241 return 0; 1242 1243 if (state) { 1244 /* 1245 * Only contexts that are short-lived [that will expire or be 1246 * reset] are allowed to survive past termination. We require 1247 * hangcheck to ensure that the persistent requests are healthy. 1248 */ 1249 if (!ctx->i915->params.enable_hangcheck) 1250 return -EINVAL; 1251 1252 i915_gem_context_set_persistence(ctx); 1253 } else { 1254 /* To cancel a context we use "preempt-to-idle" */ 1255 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 1256 return -ENODEV; 1257 1258 /* 1259 * If the cancel fails, we then need to reset, cleanly! 1260 * 1261 * If the per-engine reset fails, all hope is lost! We resort 1262 * to a full GPU reset in that unlikely case, but realistically 1263 * if the engine could not reset, the full reset does not fare 1264 * much better. The damage has been done. 1265 * 1266 * However, if we cannot reset an engine by itself, we cannot 1267 * cleanup a hanging persistent context without causing 1268 * colateral damage, and we should not pretend we can by 1269 * exposing the interface. 1270 */ 1271 if (!intel_has_reset_engine(&ctx->i915->gt)) 1272 return -ENODEV; 1273 1274 i915_gem_context_clear_persistence(ctx); 1275 } 1276 1277 return 0; 1278 } 1279 1280 static inline struct i915_gem_engines * 1281 __context_engines_await(const struct i915_gem_context *ctx, 1282 bool *user_engines) 1283 { 1284 struct i915_gem_engines *engines; 1285 1286 rcu_read_lock(); 1287 do { 1288 engines = rcu_dereference(ctx->engines); 1289 GEM_BUG_ON(!engines); 1290 1291 if (user_engines) 1292 *user_engines = i915_gem_context_user_engines(ctx); 1293 1294 /* successful await => strong mb */ 1295 if (unlikely(!i915_sw_fence_await(&engines->fence))) 1296 continue; 1297 1298 if (likely(engines == rcu_access_pointer(ctx->engines))) 1299 break; 1300 1301 i915_sw_fence_complete(&engines->fence); 1302 } while (1); 1303 rcu_read_unlock(); 1304 1305 return engines; 1306 } 1307 1308 static void 1309 context_apply_all(struct i915_gem_context *ctx, 1310 void (*fn)(struct intel_context *ce, void *data), 1311 void *data) 1312 { 1313 struct i915_gem_engines_iter it; 1314 struct i915_gem_engines *e; 1315 struct intel_context *ce; 1316 1317 e = __context_engines_await(ctx, NULL); 1318 for_each_gem_engine(ce, e, it) 1319 fn(ce, data); 1320 i915_sw_fence_complete(&e->fence); 1321 } 1322 1323 static struct i915_gem_context * 1324 i915_gem_create_context(struct drm_i915_private *i915, 1325 const struct i915_gem_proto_context *pc) 1326 { 1327 struct i915_gem_context *ctx; 1328 struct i915_address_space *vm = NULL; 1329 struct i915_gem_engines *e; 1330 int err; 1331 int i; 1332 1333 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 1334 if (!ctx) 1335 return ERR_PTR(-ENOMEM); 1336 1337 kref_init(&ctx->ref); 1338 ctx->i915 = i915; 1339 ctx->sched = pc->sched; 1340 mutex_init(&ctx->mutex); 1341 INIT_LIST_HEAD(&ctx->link); 1342 1343 spin_lock_init(&ctx->stale.lock); 1344 INIT_LIST_HEAD(&ctx->stale.engines); 1345 1346 if (pc->vm) { 1347 vm = i915_vm_get(pc->vm); 1348 } else if (HAS_FULL_PPGTT(i915)) { 1349 struct i915_ppgtt *ppgtt; 1350 1351 ppgtt = i915_ppgtt_create(&i915->gt); 1352 if (IS_ERR(ppgtt)) { 1353 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 1354 PTR_ERR(ppgtt)); 1355 err = PTR_ERR(ppgtt); 1356 goto err_ctx; 1357 } 1358 vm = &ppgtt->vm; 1359 } 1360 if (vm) { 1361 RCU_INIT_POINTER(ctx->vm, i915_vm_open(vm)); 1362 1363 /* i915_vm_open() takes a reference */ 1364 i915_vm_put(vm); 1365 } 1366 1367 mutex_init(&ctx->engines_mutex); 1368 if (pc->num_user_engines >= 0) { 1369 i915_gem_context_set_user_engines(ctx); 1370 e = user_engines(ctx, pc->num_user_engines, pc->user_engines); 1371 } else { 1372 i915_gem_context_clear_user_engines(ctx); 1373 e = default_engines(ctx, pc->legacy_rcs_sseu); 1374 } 1375 if (IS_ERR(e)) { 1376 err = PTR_ERR(e); 1377 goto err_vm; 1378 } 1379 RCU_INIT_POINTER(ctx->engines, e); 1380 1381 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 1382 mutex_init(&ctx->lut_mutex); 1383 1384 /* NB: Mark all slices as needing a remap so that when the context first 1385 * loads it will restore whatever remap state already exists. If there 1386 * is no remap info, it will be a NOP. */ 1387 ctx->remap_slice = ALL_L3_SLICES(i915); 1388 1389 ctx->user_flags = pc->user_flags; 1390 1391 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 1392 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 1393 1394 if (pc->single_timeline) { 1395 err = drm_syncobj_create(&ctx->syncobj, 1396 DRM_SYNCOBJ_CREATE_SIGNALED, 1397 NULL); 1398 if (err) 1399 goto err_engines; 1400 } 1401 1402 trace_i915_context_create(ctx); 1403 1404 return ctx; 1405 1406 err_engines: 1407 free_engines(e); 1408 err_vm: 1409 if (ctx->vm) 1410 i915_vm_close(ctx->vm); 1411 err_ctx: 1412 kfree(ctx); 1413 return ERR_PTR(err); 1414 } 1415 1416 static void init_contexts(struct i915_gem_contexts *gc) 1417 { 1418 spin_lock_init(&gc->lock); 1419 INIT_LIST_HEAD(&gc->list); 1420 } 1421 1422 void i915_gem_init__contexts(struct drm_i915_private *i915) 1423 { 1424 init_contexts(&i915->gem.contexts); 1425 } 1426 1427 static void gem_context_register(struct i915_gem_context *ctx, 1428 struct drm_i915_file_private *fpriv, 1429 u32 id) 1430 { 1431 struct drm_i915_private *i915 = ctx->i915; 1432 void *old; 1433 1434 ctx->file_priv = fpriv; 1435 1436 ctx->pid = get_task_pid(current, PIDTYPE_PID); 1437 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 1438 current->comm, pid_nr(ctx->pid)); 1439 1440 /* And finally expose ourselves to userspace via the idr */ 1441 old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); 1442 WARN_ON(old); 1443 1444 spin_lock(&i915->gem.contexts.lock); 1445 list_add_tail(&ctx->link, &i915->gem.contexts.list); 1446 spin_unlock(&i915->gem.contexts.lock); 1447 } 1448 1449 int i915_gem_context_open(struct drm_i915_private *i915, 1450 struct drm_file *file) 1451 { 1452 struct drm_i915_file_private *file_priv = file->driver_priv; 1453 struct i915_gem_proto_context *pc; 1454 struct i915_gem_context *ctx; 1455 int err; 1456 1457 mutex_init(&file_priv->proto_context_lock); 1458 xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC); 1459 1460 /* 0 reserved for the default context */ 1461 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1); 1462 1463 /* 0 reserved for invalid/unassigned ppgtt */ 1464 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 1465 1466 pc = proto_context_create(i915, 0); 1467 if (IS_ERR(pc)) { 1468 err = PTR_ERR(pc); 1469 goto err; 1470 } 1471 1472 ctx = i915_gem_create_context(i915, pc); 1473 proto_context_close(pc); 1474 if (IS_ERR(ctx)) { 1475 err = PTR_ERR(ctx); 1476 goto err; 1477 } 1478 1479 gem_context_register(ctx, file_priv, 0); 1480 1481 return 0; 1482 1483 err: 1484 xa_destroy(&file_priv->vm_xa); 1485 xa_destroy(&file_priv->context_xa); 1486 xa_destroy(&file_priv->proto_context_xa); 1487 mutex_destroy(&file_priv->proto_context_lock); 1488 return err; 1489 } 1490 1491 void i915_gem_context_close(struct drm_file *file) 1492 { 1493 struct drm_i915_file_private *file_priv = file->driver_priv; 1494 struct i915_gem_proto_context *pc; 1495 struct i915_address_space *vm; 1496 struct i915_gem_context *ctx; 1497 unsigned long idx; 1498 1499 xa_for_each(&file_priv->proto_context_xa, idx, pc) 1500 proto_context_close(pc); 1501 xa_destroy(&file_priv->proto_context_xa); 1502 mutex_destroy(&file_priv->proto_context_lock); 1503 1504 xa_for_each(&file_priv->context_xa, idx, ctx) 1505 context_close(ctx); 1506 xa_destroy(&file_priv->context_xa); 1507 1508 xa_for_each(&file_priv->vm_xa, idx, vm) 1509 i915_vm_put(vm); 1510 xa_destroy(&file_priv->vm_xa); 1511 } 1512 1513 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 1514 struct drm_file *file) 1515 { 1516 struct drm_i915_private *i915 = to_i915(dev); 1517 struct drm_i915_gem_vm_control *args = data; 1518 struct drm_i915_file_private *file_priv = file->driver_priv; 1519 struct i915_ppgtt *ppgtt; 1520 u32 id; 1521 int err; 1522 1523 if (!HAS_FULL_PPGTT(i915)) 1524 return -ENODEV; 1525 1526 if (args->flags) 1527 return -EINVAL; 1528 1529 ppgtt = i915_ppgtt_create(&i915->gt); 1530 if (IS_ERR(ppgtt)) 1531 return PTR_ERR(ppgtt); 1532 1533 if (args->extensions) { 1534 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 1535 NULL, 0, 1536 ppgtt); 1537 if (err) 1538 goto err_put; 1539 } 1540 1541 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 1542 xa_limit_32b, GFP_KERNEL); 1543 if (err) 1544 goto err_put; 1545 1546 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1547 args->vm_id = id; 1548 return 0; 1549 1550 err_put: 1551 i915_vm_put(&ppgtt->vm); 1552 return err; 1553 } 1554 1555 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 1556 struct drm_file *file) 1557 { 1558 struct drm_i915_file_private *file_priv = file->driver_priv; 1559 struct drm_i915_gem_vm_control *args = data; 1560 struct i915_address_space *vm; 1561 1562 if (args->flags) 1563 return -EINVAL; 1564 1565 if (args->extensions) 1566 return -EINVAL; 1567 1568 vm = xa_erase(&file_priv->vm_xa, args->vm_id); 1569 if (!vm) 1570 return -ENOENT; 1571 1572 i915_vm_put(vm); 1573 return 0; 1574 } 1575 1576 static int get_ppgtt(struct drm_i915_file_private *file_priv, 1577 struct i915_gem_context *ctx, 1578 struct drm_i915_gem_context_param *args) 1579 { 1580 struct i915_address_space *vm; 1581 int err; 1582 u32 id; 1583 1584 if (!rcu_access_pointer(ctx->vm)) 1585 return -ENODEV; 1586 1587 rcu_read_lock(); 1588 vm = context_get_vm_rcu(ctx); 1589 rcu_read_unlock(); 1590 if (!vm) 1591 return -ENODEV; 1592 1593 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 1594 if (err) 1595 goto err_put; 1596 1597 i915_vm_open(vm); 1598 1599 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1600 args->value = id; 1601 args->size = 0; 1602 1603 err_put: 1604 i915_vm_put(vm); 1605 return err; 1606 } 1607 1608 int 1609 i915_gem_user_to_context_sseu(struct intel_gt *gt, 1610 const struct drm_i915_gem_context_param_sseu *user, 1611 struct intel_sseu *context) 1612 { 1613 const struct sseu_dev_info *device = >->info.sseu; 1614 struct drm_i915_private *i915 = gt->i915; 1615 1616 /* No zeros in any field. */ 1617 if (!user->slice_mask || !user->subslice_mask || 1618 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1619 return -EINVAL; 1620 1621 /* Max > min. */ 1622 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1623 return -EINVAL; 1624 1625 /* 1626 * Some future proofing on the types since the uAPI is wider than the 1627 * current internal implementation. 1628 */ 1629 if (overflows_type(user->slice_mask, context->slice_mask) || 1630 overflows_type(user->subslice_mask, context->subslice_mask) || 1631 overflows_type(user->min_eus_per_subslice, 1632 context->min_eus_per_subslice) || 1633 overflows_type(user->max_eus_per_subslice, 1634 context->max_eus_per_subslice)) 1635 return -EINVAL; 1636 1637 /* Check validity against hardware. */ 1638 if (user->slice_mask & ~device->slice_mask) 1639 return -EINVAL; 1640 1641 if (user->subslice_mask & ~device->subslice_mask[0]) 1642 return -EINVAL; 1643 1644 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1645 return -EINVAL; 1646 1647 context->slice_mask = user->slice_mask; 1648 context->subslice_mask = user->subslice_mask; 1649 context->min_eus_per_subslice = user->min_eus_per_subslice; 1650 context->max_eus_per_subslice = user->max_eus_per_subslice; 1651 1652 /* Part specific restrictions. */ 1653 if (GRAPHICS_VER(i915) == 11) { 1654 unsigned int hw_s = hweight8(device->slice_mask); 1655 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 1656 unsigned int req_s = hweight8(context->slice_mask); 1657 unsigned int req_ss = hweight8(context->subslice_mask); 1658 1659 /* 1660 * Only full subslice enablement is possible if more than one 1661 * slice is turned on. 1662 */ 1663 if (req_s > 1 && req_ss != hw_ss_per_s) 1664 return -EINVAL; 1665 1666 /* 1667 * If more than four (SScount bitfield limit) subslices are 1668 * requested then the number has to be even. 1669 */ 1670 if (req_ss > 4 && (req_ss & 1)) 1671 return -EINVAL; 1672 1673 /* 1674 * If only one slice is enabled and subslice count is below the 1675 * device full enablement, it must be at most half of the all 1676 * available subslices. 1677 */ 1678 if (req_s == 1 && req_ss < hw_ss_per_s && 1679 req_ss > (hw_ss_per_s / 2)) 1680 return -EINVAL; 1681 1682 /* ABI restriction - VME use case only. */ 1683 1684 /* All slices or one slice only. */ 1685 if (req_s != 1 && req_s != hw_s) 1686 return -EINVAL; 1687 1688 /* 1689 * Half subslices or full enablement only when one slice is 1690 * enabled. 1691 */ 1692 if (req_s == 1 && 1693 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1694 return -EINVAL; 1695 1696 /* No EU configuration changes. */ 1697 if ((user->min_eus_per_subslice != 1698 device->max_eus_per_subslice) || 1699 (user->max_eus_per_subslice != 1700 device->max_eus_per_subslice)) 1701 return -EINVAL; 1702 } 1703 1704 return 0; 1705 } 1706 1707 static int set_sseu(struct i915_gem_context *ctx, 1708 struct drm_i915_gem_context_param *args) 1709 { 1710 struct drm_i915_private *i915 = ctx->i915; 1711 struct drm_i915_gem_context_param_sseu user_sseu; 1712 struct intel_context *ce; 1713 struct intel_sseu sseu; 1714 unsigned long lookup; 1715 int ret; 1716 1717 if (args->size < sizeof(user_sseu)) 1718 return -EINVAL; 1719 1720 if (GRAPHICS_VER(i915) != 11) 1721 return -ENODEV; 1722 1723 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1724 sizeof(user_sseu))) 1725 return -EFAULT; 1726 1727 if (user_sseu.rsvd) 1728 return -EINVAL; 1729 1730 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1731 return -EINVAL; 1732 1733 lookup = 0; 1734 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 1735 lookup |= LOOKUP_USER_INDEX; 1736 1737 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 1738 if (IS_ERR(ce)) 1739 return PTR_ERR(ce); 1740 1741 /* Only render engine supports RPCS configuration. */ 1742 if (ce->engine->class != RENDER_CLASS) { 1743 ret = -ENODEV; 1744 goto out_ce; 1745 } 1746 1747 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); 1748 if (ret) 1749 goto out_ce; 1750 1751 ret = intel_context_reconfigure_sseu(ce, sseu); 1752 if (ret) 1753 goto out_ce; 1754 1755 args->size = sizeof(user_sseu); 1756 1757 out_ce: 1758 intel_context_put(ce); 1759 return ret; 1760 } 1761 1762 static int 1763 set_persistence(struct i915_gem_context *ctx, 1764 const struct drm_i915_gem_context_param *args) 1765 { 1766 if (args->size) 1767 return -EINVAL; 1768 1769 return __context_set_persistence(ctx, args->value); 1770 } 1771 1772 static void __apply_priority(struct intel_context *ce, void *arg) 1773 { 1774 struct i915_gem_context *ctx = arg; 1775 1776 if (!intel_engine_has_timeslices(ce->engine)) 1777 return; 1778 1779 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 1780 intel_engine_has_semaphores(ce->engine)) 1781 intel_context_set_use_semaphores(ce); 1782 else 1783 intel_context_clear_use_semaphores(ce); 1784 } 1785 1786 static int set_priority(struct i915_gem_context *ctx, 1787 const struct drm_i915_gem_context_param *args) 1788 { 1789 int err; 1790 1791 err = validate_priority(ctx->i915, args); 1792 if (err) 1793 return err; 1794 1795 ctx->sched.priority = args->value; 1796 context_apply_all(ctx, __apply_priority, ctx); 1797 1798 return 0; 1799 } 1800 1801 static int ctx_setparam(struct drm_i915_file_private *fpriv, 1802 struct i915_gem_context *ctx, 1803 struct drm_i915_gem_context_param *args) 1804 { 1805 int ret = 0; 1806 1807 switch (args->param) { 1808 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 1809 if (args->size) 1810 ret = -EINVAL; 1811 else if (args->value) 1812 i915_gem_context_set_no_error_capture(ctx); 1813 else 1814 i915_gem_context_clear_no_error_capture(ctx); 1815 break; 1816 1817 case I915_CONTEXT_PARAM_BANNABLE: 1818 if (args->size) 1819 ret = -EINVAL; 1820 else if (!capable(CAP_SYS_ADMIN) && !args->value) 1821 ret = -EPERM; 1822 else if (args->value) 1823 i915_gem_context_set_bannable(ctx); 1824 else 1825 i915_gem_context_clear_bannable(ctx); 1826 break; 1827 1828 case I915_CONTEXT_PARAM_RECOVERABLE: 1829 if (args->size) 1830 ret = -EINVAL; 1831 else if (args->value) 1832 i915_gem_context_set_recoverable(ctx); 1833 else 1834 i915_gem_context_clear_recoverable(ctx); 1835 break; 1836 1837 case I915_CONTEXT_PARAM_PRIORITY: 1838 ret = set_priority(ctx, args); 1839 break; 1840 1841 case I915_CONTEXT_PARAM_SSEU: 1842 ret = set_sseu(ctx, args); 1843 break; 1844 1845 case I915_CONTEXT_PARAM_PERSISTENCE: 1846 ret = set_persistence(ctx, args); 1847 break; 1848 1849 case I915_CONTEXT_PARAM_NO_ZEROMAP: 1850 case I915_CONTEXT_PARAM_BAN_PERIOD: 1851 case I915_CONTEXT_PARAM_RINGSIZE: 1852 case I915_CONTEXT_PARAM_VM: 1853 case I915_CONTEXT_PARAM_ENGINES: 1854 default: 1855 ret = -EINVAL; 1856 break; 1857 } 1858 1859 return ret; 1860 } 1861 1862 struct create_ext { 1863 struct i915_gem_proto_context *pc; 1864 struct drm_i915_file_private *fpriv; 1865 }; 1866 1867 static int create_setparam(struct i915_user_extension __user *ext, void *data) 1868 { 1869 struct drm_i915_gem_context_create_ext_setparam local; 1870 const struct create_ext *arg = data; 1871 1872 if (copy_from_user(&local, ext, sizeof(local))) 1873 return -EFAULT; 1874 1875 if (local.param.ctx_id) 1876 return -EINVAL; 1877 1878 return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param); 1879 } 1880 1881 static int invalid_ext(struct i915_user_extension __user *ext, void *data) 1882 { 1883 return -EINVAL; 1884 } 1885 1886 static const i915_user_extension_fn create_extensions[] = { 1887 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 1888 [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext, 1889 }; 1890 1891 static bool client_is_banned(struct drm_i915_file_private *file_priv) 1892 { 1893 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 1894 } 1895 1896 static inline struct i915_gem_context * 1897 __context_lookup(struct drm_i915_file_private *file_priv, u32 id) 1898 { 1899 struct i915_gem_context *ctx; 1900 1901 rcu_read_lock(); 1902 ctx = xa_load(&file_priv->context_xa, id); 1903 if (ctx && !kref_get_unless_zero(&ctx->ref)) 1904 ctx = NULL; 1905 rcu_read_unlock(); 1906 1907 return ctx; 1908 } 1909 1910 static struct i915_gem_context * 1911 finalize_create_context_locked(struct drm_i915_file_private *file_priv, 1912 struct i915_gem_proto_context *pc, u32 id) 1913 { 1914 struct i915_gem_context *ctx; 1915 void *old; 1916 1917 lockdep_assert_held(&file_priv->proto_context_lock); 1918 1919 ctx = i915_gem_create_context(file_priv->dev_priv, pc); 1920 if (IS_ERR(ctx)) 1921 return ctx; 1922 1923 gem_context_register(ctx, file_priv, id); 1924 1925 old = xa_erase(&file_priv->proto_context_xa, id); 1926 GEM_BUG_ON(old != pc); 1927 proto_context_close(pc); 1928 1929 /* One for the xarray and one for the caller */ 1930 return i915_gem_context_get(ctx); 1931 } 1932 1933 struct i915_gem_context * 1934 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) 1935 { 1936 struct i915_gem_proto_context *pc; 1937 struct i915_gem_context *ctx; 1938 1939 ctx = __context_lookup(file_priv, id); 1940 if (ctx) 1941 return ctx; 1942 1943 mutex_lock(&file_priv->proto_context_lock); 1944 /* Try one more time under the lock */ 1945 ctx = __context_lookup(file_priv, id); 1946 if (!ctx) { 1947 pc = xa_load(&file_priv->proto_context_xa, id); 1948 if (!pc) 1949 ctx = ERR_PTR(-ENOENT); 1950 else 1951 ctx = finalize_create_context_locked(file_priv, pc, id); 1952 } 1953 mutex_unlock(&file_priv->proto_context_lock); 1954 1955 return ctx; 1956 } 1957 1958 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 1959 struct drm_file *file) 1960 { 1961 struct drm_i915_private *i915 = to_i915(dev); 1962 struct drm_i915_gem_context_create_ext *args = data; 1963 struct create_ext ext_data; 1964 int ret; 1965 u32 id; 1966 1967 if (!DRIVER_CAPS(i915)->has_logical_contexts) 1968 return -ENODEV; 1969 1970 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 1971 return -EINVAL; 1972 1973 ret = intel_gt_terminally_wedged(&i915->gt); 1974 if (ret) 1975 return ret; 1976 1977 ext_data.fpriv = file->driver_priv; 1978 if (client_is_banned(ext_data.fpriv)) { 1979 drm_dbg(&i915->drm, 1980 "client %s[%d] banned from creating ctx\n", 1981 current->comm, task_pid_nr(current)); 1982 return -EIO; 1983 } 1984 1985 ext_data.pc = proto_context_create(i915, args->flags); 1986 if (IS_ERR(ext_data.pc)) 1987 return PTR_ERR(ext_data.pc); 1988 1989 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 1990 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 1991 create_extensions, 1992 ARRAY_SIZE(create_extensions), 1993 &ext_data); 1994 if (ret) 1995 goto err_pc; 1996 } 1997 1998 if (GRAPHICS_VER(i915) > 12) { 1999 struct i915_gem_context *ctx; 2000 2001 /* Get ourselves a context ID */ 2002 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL, 2003 xa_limit_32b, GFP_KERNEL); 2004 if (ret) 2005 goto err_pc; 2006 2007 ctx = i915_gem_create_context(i915, ext_data.pc); 2008 if (IS_ERR(ctx)) { 2009 ret = PTR_ERR(ctx); 2010 goto err_pc; 2011 } 2012 2013 proto_context_close(ext_data.pc); 2014 gem_context_register(ctx, ext_data.fpriv, id); 2015 } else { 2016 ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id); 2017 if (ret < 0) 2018 goto err_pc; 2019 } 2020 2021 args->ctx_id = id; 2022 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); 2023 2024 return 0; 2025 2026 err_pc: 2027 proto_context_close(ext_data.pc); 2028 return ret; 2029 } 2030 2031 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2032 struct drm_file *file) 2033 { 2034 struct drm_i915_gem_context_destroy *args = data; 2035 struct drm_i915_file_private *file_priv = file->driver_priv; 2036 struct i915_gem_proto_context *pc; 2037 struct i915_gem_context *ctx; 2038 2039 if (args->pad != 0) 2040 return -EINVAL; 2041 2042 if (!args->ctx_id) 2043 return -ENOENT; 2044 2045 /* We need to hold the proto-context lock here to prevent races 2046 * with finalize_create_context_locked(). 2047 */ 2048 mutex_lock(&file_priv->proto_context_lock); 2049 ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2050 pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id); 2051 mutex_unlock(&file_priv->proto_context_lock); 2052 2053 if (!ctx && !pc) 2054 return -ENOENT; 2055 GEM_WARN_ON(ctx && pc); 2056 2057 if (pc) 2058 proto_context_close(pc); 2059 2060 if (ctx) 2061 context_close(ctx); 2062 2063 return 0; 2064 } 2065 2066 static int get_sseu(struct i915_gem_context *ctx, 2067 struct drm_i915_gem_context_param *args) 2068 { 2069 struct drm_i915_gem_context_param_sseu user_sseu; 2070 struct intel_context *ce; 2071 unsigned long lookup; 2072 int err; 2073 2074 if (args->size == 0) 2075 goto out; 2076 else if (args->size < sizeof(user_sseu)) 2077 return -EINVAL; 2078 2079 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2080 sizeof(user_sseu))) 2081 return -EFAULT; 2082 2083 if (user_sseu.rsvd) 2084 return -EINVAL; 2085 2086 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2087 return -EINVAL; 2088 2089 lookup = 0; 2090 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2091 lookup |= LOOKUP_USER_INDEX; 2092 2093 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2094 if (IS_ERR(ce)) 2095 return PTR_ERR(ce); 2096 2097 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2098 if (err) { 2099 intel_context_put(ce); 2100 return err; 2101 } 2102 2103 user_sseu.slice_mask = ce->sseu.slice_mask; 2104 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2105 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2106 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2107 2108 intel_context_unlock_pinned(ce); 2109 intel_context_put(ce); 2110 2111 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2112 sizeof(user_sseu))) 2113 return -EFAULT; 2114 2115 out: 2116 args->size = sizeof(user_sseu); 2117 2118 return 0; 2119 } 2120 2121 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2122 struct drm_file *file) 2123 { 2124 struct drm_i915_file_private *file_priv = file->driver_priv; 2125 struct drm_i915_gem_context_param *args = data; 2126 struct i915_gem_context *ctx; 2127 int ret = 0; 2128 2129 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2130 if (IS_ERR(ctx)) 2131 return PTR_ERR(ctx); 2132 2133 switch (args->param) { 2134 case I915_CONTEXT_PARAM_GTT_SIZE: 2135 args->size = 0; 2136 rcu_read_lock(); 2137 if (rcu_access_pointer(ctx->vm)) 2138 args->value = rcu_dereference(ctx->vm)->total; 2139 else 2140 args->value = to_i915(dev)->ggtt.vm.total; 2141 rcu_read_unlock(); 2142 break; 2143 2144 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2145 args->size = 0; 2146 args->value = i915_gem_context_no_error_capture(ctx); 2147 break; 2148 2149 case I915_CONTEXT_PARAM_BANNABLE: 2150 args->size = 0; 2151 args->value = i915_gem_context_is_bannable(ctx); 2152 break; 2153 2154 case I915_CONTEXT_PARAM_RECOVERABLE: 2155 args->size = 0; 2156 args->value = i915_gem_context_is_recoverable(ctx); 2157 break; 2158 2159 case I915_CONTEXT_PARAM_PRIORITY: 2160 args->size = 0; 2161 args->value = ctx->sched.priority; 2162 break; 2163 2164 case I915_CONTEXT_PARAM_SSEU: 2165 ret = get_sseu(ctx, args); 2166 break; 2167 2168 case I915_CONTEXT_PARAM_VM: 2169 ret = get_ppgtt(file_priv, ctx, args); 2170 break; 2171 2172 case I915_CONTEXT_PARAM_PERSISTENCE: 2173 args->size = 0; 2174 args->value = i915_gem_context_is_persistent(ctx); 2175 break; 2176 2177 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2178 case I915_CONTEXT_PARAM_BAN_PERIOD: 2179 case I915_CONTEXT_PARAM_ENGINES: 2180 case I915_CONTEXT_PARAM_RINGSIZE: 2181 default: 2182 ret = -EINVAL; 2183 break; 2184 } 2185 2186 i915_gem_context_put(ctx); 2187 return ret; 2188 } 2189 2190 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2191 struct drm_file *file) 2192 { 2193 struct drm_i915_file_private *file_priv = file->driver_priv; 2194 struct drm_i915_gem_context_param *args = data; 2195 struct i915_gem_proto_context *pc; 2196 struct i915_gem_context *ctx; 2197 int ret = 0; 2198 2199 mutex_lock(&file_priv->proto_context_lock); 2200 ctx = __context_lookup(file_priv, args->ctx_id); 2201 if (!ctx) { 2202 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id); 2203 if (pc) { 2204 /* Contexts should be finalized inside 2205 * GEM_CONTEXT_CREATE starting with graphics 2206 * version 13. 2207 */ 2208 WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12); 2209 ret = set_proto_ctx_param(file_priv, pc, args); 2210 } else { 2211 ret = -ENOENT; 2212 } 2213 } 2214 mutex_unlock(&file_priv->proto_context_lock); 2215 2216 if (ctx) { 2217 ret = ctx_setparam(file_priv, ctx, args); 2218 i915_gem_context_put(ctx); 2219 } 2220 2221 return ret; 2222 } 2223 2224 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2225 void *data, struct drm_file *file) 2226 { 2227 struct drm_i915_private *i915 = to_i915(dev); 2228 struct drm_i915_reset_stats *args = data; 2229 struct i915_gem_context *ctx; 2230 2231 if (args->flags || args->pad) 2232 return -EINVAL; 2233 2234 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); 2235 if (IS_ERR(ctx)) 2236 return PTR_ERR(ctx); 2237 2238 /* 2239 * We opt for unserialised reads here. This may result in tearing 2240 * in the extremely unlikely event of a GPU hang on this context 2241 * as we are querying them. If we need that extra layer of protection, 2242 * we should wrap the hangstats with a seqlock. 2243 */ 2244 2245 if (capable(CAP_SYS_ADMIN)) 2246 args->reset_count = i915_reset_count(&i915->gpu_error); 2247 else 2248 args->reset_count = 0; 2249 2250 args->batch_active = atomic_read(&ctx->guilty_count); 2251 args->batch_pending = atomic_read(&ctx->active_count); 2252 2253 i915_gem_context_put(ctx); 2254 return 0; 2255 } 2256 2257 /* GEM context-engines iterator: for_each_gem_engine() */ 2258 struct intel_context * 2259 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2260 { 2261 const struct i915_gem_engines *e = it->engines; 2262 struct intel_context *ctx; 2263 2264 if (unlikely(!e)) 2265 return NULL; 2266 2267 do { 2268 if (it->idx >= e->num_engines) 2269 return NULL; 2270 2271 ctx = e->engines[it->idx++]; 2272 } while (!ctx); 2273 2274 return ctx; 2275 } 2276 2277 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2278 #include "selftests/mock_context.c" 2279 #include "selftests/i915_gem_context.c" 2280 #endif 2281 2282 void i915_gem_context_module_exit(void) 2283 { 2284 kmem_cache_destroy(slab_luts); 2285 } 2286 2287 int __init i915_gem_context_module_init(void) 2288 { 2289 slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2290 if (!slab_luts) 2291 return -ENOMEM; 2292 2293 return 0; 2294 } 2295