1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2011-2012 Intel Corporation 5 */ 6 7 /* 8 * This file implements HW context support. On gen5+ a HW context consists of an 9 * opaque GPU object which is referenced at times of context saves and restores. 10 * With RC6 enabled, the context is also referenced as the GPU enters and exists 11 * from RC6 (GPU has it's own internal power context, except on gen5). Though 12 * something like a context does exist for the media ring, the code only 13 * supports contexts for the render ring. 14 * 15 * In software, there is a distinction between contexts created by the user, 16 * and the default HW context. The default HW context is used by GPU clients 17 * that do not request setup of their own hardware context. The default 18 * context's state is never restored to help prevent programming errors. This 19 * would happen if a client ran and piggy-backed off another clients GPU state. 20 * The default context only exists to give the GPU some offset to load as the 21 * current to invoke a save of the context we actually care about. In fact, the 22 * code could likely be constructed, albeit in a more complicated fashion, to 23 * never use the default context, though that limits the driver's ability to 24 * swap out, and/or destroy other contexts. 25 * 26 * All other contexts are created as a request by the GPU client. These contexts 27 * store GPU state, and thus allow GPU clients to not re-emit state (and 28 * potentially query certain state) at any time. The kernel driver makes 29 * certain that the appropriate commands are inserted. 30 * 31 * The context life cycle is semi-complicated in that context BOs may live 32 * longer than the context itself because of the way the hardware, and object 33 * tracking works. Below is a very crude representation of the state machine 34 * describing the context life. 35 * refcount pincount active 36 * S0: initial state 0 0 0 37 * S1: context created 1 0 0 38 * S2: context is currently running 2 1 X 39 * S3: GPU referenced, but not current 2 0 1 40 * S4: context is current, but destroyed 1 1 0 41 * S5: like S3, but destroyed 1 0 1 42 * 43 * The most common (but not all) transitions: 44 * S0->S1: client creates a context 45 * S1->S2: client submits execbuf with context 46 * S2->S3: other clients submits execbuf with context 47 * S3->S1: context object was retired 48 * S3->S2: clients submits another execbuf 49 * S2->S4: context destroy called with current context 50 * S3->S5->S0: destroy path 51 * S4->S5->S0: destroy path on current context 52 * 53 * There are two confusing terms used above: 54 * The "current context" means the context which is currently running on the 55 * GPU. The GPU has loaded its state already and has stored away the gtt 56 * offset of the BO. The GPU is not actively referencing the data at this 57 * offset, but it will on the next context switch. The only way to avoid this 58 * is to do a GPU reset. 59 * 60 * An "active context' is one which was previously the "current context" and is 61 * on the active list waiting for the next context switch to occur. Until this 62 * happens, the object must remain at the same gtt offset. It is therefore 63 * possible to destroy a context, but it is still active. 64 * 65 */ 66 67 #include <linux/log2.h> 68 #include <linux/nospec.h> 69 70 #include <drm/i915_drm.h> 71 72 #include "gt/intel_engine_heartbeat.h" 73 #include "gt/intel_engine_user.h" 74 #include "gt/intel_lrc_reg.h" 75 #include "gt/intel_ring.h" 76 77 #include "i915_gem_context.h" 78 #include "i915_globals.h" 79 #include "i915_trace.h" 80 #include "i915_user_extensions.h" 81 82 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 83 84 static struct i915_global_gem_context { 85 struct i915_global base; 86 struct kmem_cache *slab_luts; 87 } global; 88 89 struct i915_lut_handle *i915_lut_handle_alloc(void) 90 { 91 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); 92 } 93 94 void i915_lut_handle_free(struct i915_lut_handle *lut) 95 { 96 return kmem_cache_free(global.slab_luts, lut); 97 } 98 99 static void lut_close(struct i915_gem_context *ctx) 100 { 101 struct radix_tree_iter iter; 102 void __rcu **slot; 103 104 lockdep_assert_held(&ctx->mutex); 105 106 rcu_read_lock(); 107 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 108 struct i915_vma *vma = rcu_dereference_raw(*slot); 109 struct drm_i915_gem_object *obj = vma->obj; 110 struct i915_lut_handle *lut; 111 112 if (!kref_get_unless_zero(&obj->base.refcount)) 113 continue; 114 115 rcu_read_unlock(); 116 i915_gem_object_lock(obj); 117 list_for_each_entry(lut, &obj->lut_list, obj_link) { 118 if (lut->ctx != ctx) 119 continue; 120 121 if (lut->handle != iter.index) 122 continue; 123 124 list_del(&lut->obj_link); 125 break; 126 } 127 i915_gem_object_unlock(obj); 128 rcu_read_lock(); 129 130 if (&lut->obj_link != &obj->lut_list) { 131 i915_lut_handle_free(lut); 132 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 133 if (atomic_dec_and_test(&vma->open_count) && 134 !i915_vma_is_ggtt(vma)) 135 i915_vma_close(vma); 136 i915_gem_object_put(obj); 137 } 138 139 i915_gem_object_put(obj); 140 } 141 rcu_read_unlock(); 142 } 143 144 static struct intel_context * 145 lookup_user_engine(struct i915_gem_context *ctx, 146 unsigned long flags, 147 const struct i915_engine_class_instance *ci) 148 #define LOOKUP_USER_INDEX BIT(0) 149 { 150 int idx; 151 152 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 153 return ERR_PTR(-EINVAL); 154 155 if (!i915_gem_context_user_engines(ctx)) { 156 struct intel_engine_cs *engine; 157 158 engine = intel_engine_lookup_user(ctx->i915, 159 ci->engine_class, 160 ci->engine_instance); 161 if (!engine) 162 return ERR_PTR(-EINVAL); 163 164 idx = engine->legacy_idx; 165 } else { 166 idx = ci->engine_instance; 167 } 168 169 return i915_gem_context_get_engine(ctx, idx); 170 } 171 172 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 173 { 174 while (count--) { 175 if (!e->engines[count]) 176 continue; 177 178 intel_context_put(e->engines[count]); 179 } 180 kfree(e); 181 } 182 183 static void free_engines(struct i915_gem_engines *e) 184 { 185 __free_engines(e, e->num_engines); 186 } 187 188 static void free_engines_rcu(struct rcu_head *rcu) 189 { 190 free_engines(container_of(rcu, struct i915_gem_engines, rcu)); 191 } 192 193 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) 194 { 195 const struct intel_gt *gt = &ctx->i915->gt; 196 struct intel_engine_cs *engine; 197 struct i915_gem_engines *e; 198 enum intel_engine_id id; 199 200 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL); 201 if (!e) 202 return ERR_PTR(-ENOMEM); 203 204 init_rcu_head(&e->rcu); 205 for_each_engine(engine, gt, id) { 206 struct intel_context *ce; 207 208 if (engine->legacy_idx == INVALID_ENGINE) 209 continue; 210 211 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 212 GEM_BUG_ON(e->engines[engine->legacy_idx]); 213 214 ce = intel_context_create(ctx, engine); 215 if (IS_ERR(ce)) { 216 __free_engines(e, e->num_engines + 1); 217 return ERR_CAST(ce); 218 } 219 220 e->engines[engine->legacy_idx] = ce; 221 e->num_engines = max(e->num_engines, engine->legacy_idx); 222 } 223 e->num_engines++; 224 225 return e; 226 } 227 228 static void i915_gem_context_free(struct i915_gem_context *ctx) 229 { 230 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 231 232 spin_lock(&ctx->i915->gem.contexts.lock); 233 list_del(&ctx->link); 234 spin_unlock(&ctx->i915->gem.contexts.lock); 235 236 free_engines(rcu_access_pointer(ctx->engines)); 237 mutex_destroy(&ctx->engines_mutex); 238 239 if (ctx->timeline) 240 intel_timeline_put(ctx->timeline); 241 242 kfree(ctx->name); 243 put_pid(ctx->pid); 244 245 mutex_destroy(&ctx->mutex); 246 247 kfree_rcu(ctx, rcu); 248 } 249 250 static void contexts_free_all(struct llist_node *list) 251 { 252 struct i915_gem_context *ctx, *cn; 253 254 llist_for_each_entry_safe(ctx, cn, list, free_link) 255 i915_gem_context_free(ctx); 256 } 257 258 static void contexts_flush_free(struct i915_gem_contexts *gc) 259 { 260 contexts_free_all(llist_del_all(&gc->free_list)); 261 } 262 263 static void contexts_free_worker(struct work_struct *work) 264 { 265 struct i915_gem_contexts *gc = 266 container_of(work, typeof(*gc), free_work); 267 268 contexts_flush_free(gc); 269 } 270 271 void i915_gem_context_release(struct kref *ref) 272 { 273 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 274 struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; 275 276 trace_i915_context_free(ctx); 277 if (llist_add(&ctx->free_link, &gc->free_list)) 278 schedule_work(&gc->free_work); 279 } 280 281 static inline struct i915_gem_engines * 282 __context_engines_static(const struct i915_gem_context *ctx) 283 { 284 return rcu_dereference_protected(ctx->engines, true); 285 } 286 287 static bool __reset_engine(struct intel_engine_cs *engine) 288 { 289 struct intel_gt *gt = engine->gt; 290 bool success = false; 291 292 if (!intel_has_reset_engine(gt)) 293 return false; 294 295 if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, 296 >->reset.flags)) { 297 success = intel_engine_reset(engine, NULL) == 0; 298 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, 299 >->reset.flags); 300 } 301 302 return success; 303 } 304 305 static void __reset_context(struct i915_gem_context *ctx, 306 struct intel_engine_cs *engine) 307 { 308 intel_gt_handle_error(engine->gt, engine->mask, 0, 309 "context closure in %s", ctx->name); 310 } 311 312 static bool __cancel_engine(struct intel_engine_cs *engine) 313 { 314 /* 315 * Send a "high priority pulse" down the engine to cause the 316 * current request to be momentarily preempted. (If it fails to 317 * be preempted, it will be reset). As we have marked our context 318 * as banned, any incomplete request, including any running, will 319 * be skipped following the preemption. 320 * 321 * If there is no hangchecking (one of the reasons why we try to 322 * cancel the context) and no forced preemption, there may be no 323 * means by which we reset the GPU and evict the persistent hog. 324 * Ergo if we are unable to inject a preemptive pulse that can 325 * kill the banned context, we fallback to doing a local reset 326 * instead. 327 */ 328 if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && 329 !intel_engine_pulse(engine)) 330 return true; 331 332 /* If we are unable to send a pulse, try resetting this engine. */ 333 return __reset_engine(engine); 334 } 335 336 static struct intel_engine_cs *__active_engine(struct i915_request *rq) 337 { 338 struct intel_engine_cs *engine, *locked; 339 340 /* 341 * Serialise with __i915_request_submit() so that it sees 342 * is-banned?, or we know the request is already inflight. 343 */ 344 locked = READ_ONCE(rq->engine); 345 spin_lock_irq(&locked->active.lock); 346 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 347 spin_unlock(&locked->active.lock); 348 spin_lock(&engine->active.lock); 349 locked = engine; 350 } 351 352 engine = NULL; 353 if (i915_request_is_active(rq) && !rq->fence.error) 354 engine = rq->engine; 355 356 spin_unlock_irq(&locked->active.lock); 357 358 return engine; 359 } 360 361 static struct intel_engine_cs *active_engine(struct intel_context *ce) 362 { 363 struct intel_engine_cs *engine = NULL; 364 struct i915_request *rq; 365 366 if (!ce->timeline) 367 return NULL; 368 369 rcu_read_lock(); 370 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 371 if (i915_request_completed(rq)) 372 break; 373 374 /* Check with the backend if the request is inflight */ 375 engine = __active_engine(rq); 376 if (engine) 377 break; 378 } 379 rcu_read_unlock(); 380 381 return engine; 382 } 383 384 static void kill_context(struct i915_gem_context *ctx) 385 { 386 struct i915_gem_engines_iter it; 387 struct intel_context *ce; 388 389 /* 390 * If we are already banned, it was due to a guilty request causing 391 * a reset and the entire context being evicted from the GPU. 392 */ 393 if (i915_gem_context_is_banned(ctx)) 394 return; 395 396 i915_gem_context_set_banned(ctx); 397 398 /* 399 * Map the user's engine back to the actual engines; one virtual 400 * engine will be mapped to multiple engines, and using ctx->engine[] 401 * the same engine may be have multiple instances in the user's map. 402 * However, we only care about pending requests, so only include 403 * engines on which there are incomplete requests. 404 */ 405 for_each_gem_engine(ce, __context_engines_static(ctx), it) { 406 struct intel_engine_cs *engine; 407 408 /* 409 * Check the current active state of this context; if we 410 * are currently executing on the GPU we need to evict 411 * ourselves. On the other hand, if we haven't yet been 412 * submitted to the GPU or if everything is complete, 413 * we have nothing to do. 414 */ 415 engine = active_engine(ce); 416 417 /* First attempt to gracefully cancel the context */ 418 if (engine && !__cancel_engine(engine)) 419 /* 420 * If we are unable to send a preemptive pulse to bump 421 * the context from the GPU, we have to resort to a full 422 * reset. We hope the collateral damage is worth it. 423 */ 424 __reset_context(ctx, engine); 425 } 426 } 427 428 static void context_close(struct i915_gem_context *ctx) 429 { 430 struct i915_address_space *vm; 431 432 i915_gem_context_set_closed(ctx); 433 434 mutex_lock(&ctx->mutex); 435 436 vm = i915_gem_context_vm(ctx); 437 if (vm) 438 i915_vm_close(vm); 439 440 ctx->file_priv = ERR_PTR(-EBADF); 441 442 /* 443 * The LUT uses the VMA as a backpointer to unref the object, 444 * so we need to clear the LUT before we close all the VMA (inside 445 * the ppgtt). 446 */ 447 lut_close(ctx); 448 449 mutex_unlock(&ctx->mutex); 450 451 /* 452 * If the user has disabled hangchecking, we can not be sure that 453 * the batches will ever complete after the context is closed, 454 * keeping the context and all resources pinned forever. So in this 455 * case we opt to forcibly kill off all remaining requests on 456 * context close. 457 */ 458 if (!i915_gem_context_is_persistent(ctx) || 459 !i915_modparams.enable_hangcheck) 460 kill_context(ctx); 461 462 i915_gem_context_put(ctx); 463 } 464 465 static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 466 { 467 if (i915_gem_context_is_persistent(ctx) == state) 468 return 0; 469 470 if (state) { 471 /* 472 * Only contexts that are short-lived [that will expire or be 473 * reset] are allowed to survive past termination. We require 474 * hangcheck to ensure that the persistent requests are healthy. 475 */ 476 if (!i915_modparams.enable_hangcheck) 477 return -EINVAL; 478 479 i915_gem_context_set_persistence(ctx); 480 } else { 481 /* To cancel a context we use "preempt-to-idle" */ 482 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 483 return -ENODEV; 484 485 i915_gem_context_clear_persistence(ctx); 486 } 487 488 return 0; 489 } 490 491 static struct i915_gem_context * 492 __create_context(struct drm_i915_private *i915) 493 { 494 struct i915_gem_context *ctx; 495 struct i915_gem_engines *e; 496 int err; 497 int i; 498 499 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 500 if (!ctx) 501 return ERR_PTR(-ENOMEM); 502 503 kref_init(&ctx->ref); 504 ctx->i915 = i915; 505 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); 506 mutex_init(&ctx->mutex); 507 508 mutex_init(&ctx->engines_mutex); 509 e = default_engines(ctx); 510 if (IS_ERR(e)) { 511 err = PTR_ERR(e); 512 goto err_free; 513 } 514 RCU_INIT_POINTER(ctx->engines, e); 515 516 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 517 518 /* NB: Mark all slices as needing a remap so that when the context first 519 * loads it will restore whatever remap state already exists. If there 520 * is no remap info, it will be a NOP. */ 521 ctx->remap_slice = ALL_L3_SLICES(i915); 522 523 i915_gem_context_set_bannable(ctx); 524 i915_gem_context_set_recoverable(ctx); 525 __context_set_persistence(ctx, true /* cgroup hook? */); 526 527 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 528 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 529 530 spin_lock(&i915->gem.contexts.lock); 531 list_add_tail(&ctx->link, &i915->gem.contexts.list); 532 spin_unlock(&i915->gem.contexts.lock); 533 534 return ctx; 535 536 err_free: 537 kfree(ctx); 538 return ERR_PTR(err); 539 } 540 541 static void 542 context_apply_all(struct i915_gem_context *ctx, 543 void (*fn)(struct intel_context *ce, void *data), 544 void *data) 545 { 546 struct i915_gem_engines_iter it; 547 struct intel_context *ce; 548 549 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) 550 fn(ce, data); 551 i915_gem_context_unlock_engines(ctx); 552 } 553 554 static void __apply_ppgtt(struct intel_context *ce, void *vm) 555 { 556 i915_vm_put(ce->vm); 557 ce->vm = i915_vm_get(vm); 558 } 559 560 static struct i915_address_space * 561 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) 562 { 563 struct i915_address_space *old = i915_gem_context_vm(ctx); 564 565 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); 566 567 rcu_assign_pointer(ctx->vm, i915_vm_open(vm)); 568 context_apply_all(ctx, __apply_ppgtt, vm); 569 570 return old; 571 } 572 573 static void __assign_ppgtt(struct i915_gem_context *ctx, 574 struct i915_address_space *vm) 575 { 576 if (vm == rcu_access_pointer(ctx->vm)) 577 return; 578 579 vm = __set_ppgtt(ctx, vm); 580 if (vm) 581 i915_vm_close(vm); 582 } 583 584 static void __set_timeline(struct intel_timeline **dst, 585 struct intel_timeline *src) 586 { 587 struct intel_timeline *old = *dst; 588 589 *dst = src ? intel_timeline_get(src) : NULL; 590 591 if (old) 592 intel_timeline_put(old); 593 } 594 595 static void __apply_timeline(struct intel_context *ce, void *timeline) 596 { 597 __set_timeline(&ce->timeline, timeline); 598 } 599 600 static void __assign_timeline(struct i915_gem_context *ctx, 601 struct intel_timeline *timeline) 602 { 603 __set_timeline(&ctx->timeline, timeline); 604 context_apply_all(ctx, __apply_timeline, timeline); 605 } 606 607 static struct i915_gem_context * 608 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) 609 { 610 struct i915_gem_context *ctx; 611 612 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && 613 !HAS_EXECLISTS(i915)) 614 return ERR_PTR(-EINVAL); 615 616 /* Reap the stale contexts */ 617 contexts_flush_free(&i915->gem.contexts); 618 619 ctx = __create_context(i915); 620 if (IS_ERR(ctx)) 621 return ctx; 622 623 if (HAS_FULL_PPGTT(i915)) { 624 struct i915_ppgtt *ppgtt; 625 626 ppgtt = i915_ppgtt_create(i915); 627 if (IS_ERR(ppgtt)) { 628 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 629 PTR_ERR(ppgtt)); 630 context_close(ctx); 631 return ERR_CAST(ppgtt); 632 } 633 634 mutex_lock(&ctx->mutex); 635 __assign_ppgtt(ctx, &ppgtt->vm); 636 mutex_unlock(&ctx->mutex); 637 638 i915_vm_put(&ppgtt->vm); 639 } 640 641 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 642 struct intel_timeline *timeline; 643 644 timeline = intel_timeline_create(&i915->gt, NULL); 645 if (IS_ERR(timeline)) { 646 context_close(ctx); 647 return ERR_CAST(timeline); 648 } 649 650 __assign_timeline(ctx, timeline); 651 intel_timeline_put(timeline); 652 } 653 654 trace_i915_context_create(ctx); 655 656 return ctx; 657 } 658 659 static void 660 destroy_kernel_context(struct i915_gem_context **ctxp) 661 { 662 struct i915_gem_context *ctx; 663 664 /* Keep the context ref so that we can free it immediately ourselves */ 665 ctx = i915_gem_context_get(fetch_and_zero(ctxp)); 666 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); 667 668 context_close(ctx); 669 i915_gem_context_free(ctx); 670 } 671 672 struct i915_gem_context * 673 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) 674 { 675 struct i915_gem_context *ctx; 676 677 ctx = i915_gem_create_context(i915, 0); 678 if (IS_ERR(ctx)) 679 return ctx; 680 681 i915_gem_context_clear_bannable(ctx); 682 i915_gem_context_set_persistence(ctx); 683 ctx->sched.priority = I915_USER_PRIORITY(prio); 684 685 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); 686 687 return ctx; 688 } 689 690 static void init_contexts(struct i915_gem_contexts *gc) 691 { 692 spin_lock_init(&gc->lock); 693 INIT_LIST_HEAD(&gc->list); 694 695 INIT_WORK(&gc->free_work, contexts_free_worker); 696 init_llist_head(&gc->free_list); 697 } 698 699 int i915_gem_init_contexts(struct drm_i915_private *i915) 700 { 701 struct i915_gem_context *ctx; 702 703 /* Reassure ourselves we are only called once */ 704 GEM_BUG_ON(i915->kernel_context); 705 706 init_contexts(&i915->gem.contexts); 707 708 /* lowest priority; idle task */ 709 ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN); 710 if (IS_ERR(ctx)) { 711 DRM_ERROR("Failed to create default global context\n"); 712 return PTR_ERR(ctx); 713 } 714 i915->kernel_context = ctx; 715 716 DRM_DEBUG_DRIVER("%s context support initialized\n", 717 DRIVER_CAPS(i915)->has_logical_contexts ? 718 "logical" : "fake"); 719 return 0; 720 } 721 722 void i915_gem_driver_release__contexts(struct drm_i915_private *i915) 723 { 724 destroy_kernel_context(&i915->kernel_context); 725 } 726 727 static int context_idr_cleanup(int id, void *p, void *data) 728 { 729 context_close(p); 730 return 0; 731 } 732 733 static int vm_idr_cleanup(int id, void *p, void *data) 734 { 735 i915_vm_put(p); 736 return 0; 737 } 738 739 static int gem_context_register(struct i915_gem_context *ctx, 740 struct drm_i915_file_private *fpriv) 741 { 742 struct i915_address_space *vm; 743 int ret; 744 745 ctx->file_priv = fpriv; 746 747 mutex_lock(&ctx->mutex); 748 vm = i915_gem_context_vm(ctx); 749 if (vm) 750 WRITE_ONCE(vm->file, fpriv); /* XXX */ 751 mutex_unlock(&ctx->mutex); 752 753 ctx->pid = get_task_pid(current, PIDTYPE_PID); 754 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", 755 current->comm, pid_nr(ctx->pid)); 756 if (!ctx->name) { 757 ret = -ENOMEM; 758 goto err_pid; 759 } 760 761 /* And finally expose ourselves to userspace via the idr */ 762 mutex_lock(&fpriv->context_idr_lock); 763 ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL); 764 mutex_unlock(&fpriv->context_idr_lock); 765 if (ret >= 0) 766 goto out; 767 768 kfree(fetch_and_zero(&ctx->name)); 769 err_pid: 770 put_pid(fetch_and_zero(&ctx->pid)); 771 out: 772 return ret; 773 } 774 775 int i915_gem_context_open(struct drm_i915_private *i915, 776 struct drm_file *file) 777 { 778 struct drm_i915_file_private *file_priv = file->driver_priv; 779 struct i915_gem_context *ctx; 780 int err; 781 782 mutex_init(&file_priv->context_idr_lock); 783 mutex_init(&file_priv->vm_idr_lock); 784 785 idr_init(&file_priv->context_idr); 786 idr_init_base(&file_priv->vm_idr, 1); 787 788 ctx = i915_gem_create_context(i915, 0); 789 if (IS_ERR(ctx)) { 790 err = PTR_ERR(ctx); 791 goto err; 792 } 793 794 err = gem_context_register(ctx, file_priv); 795 if (err < 0) 796 goto err_ctx; 797 798 GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); 799 GEM_BUG_ON(err > 0); 800 801 return 0; 802 803 err_ctx: 804 context_close(ctx); 805 err: 806 idr_destroy(&file_priv->vm_idr); 807 idr_destroy(&file_priv->context_idr); 808 mutex_destroy(&file_priv->vm_idr_lock); 809 mutex_destroy(&file_priv->context_idr_lock); 810 return err; 811 } 812 813 void i915_gem_context_close(struct drm_file *file) 814 { 815 struct drm_i915_file_private *file_priv = file->driver_priv; 816 struct drm_i915_private *i915 = file_priv->dev_priv; 817 818 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 819 idr_destroy(&file_priv->context_idr); 820 mutex_destroy(&file_priv->context_idr_lock); 821 822 idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); 823 idr_destroy(&file_priv->vm_idr); 824 mutex_destroy(&file_priv->vm_idr_lock); 825 826 contexts_flush_free(&i915->gem.contexts); 827 } 828 829 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 830 struct drm_file *file) 831 { 832 struct drm_i915_private *i915 = to_i915(dev); 833 struct drm_i915_gem_vm_control *args = data; 834 struct drm_i915_file_private *file_priv = file->driver_priv; 835 struct i915_ppgtt *ppgtt; 836 int err; 837 838 if (!HAS_FULL_PPGTT(i915)) 839 return -ENODEV; 840 841 if (args->flags) 842 return -EINVAL; 843 844 ppgtt = i915_ppgtt_create(i915); 845 if (IS_ERR(ppgtt)) 846 return PTR_ERR(ppgtt); 847 848 ppgtt->vm.file = file_priv; 849 850 if (args->extensions) { 851 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 852 NULL, 0, 853 ppgtt); 854 if (err) 855 goto err_put; 856 } 857 858 err = mutex_lock_interruptible(&file_priv->vm_idr_lock); 859 if (err) 860 goto err_put; 861 862 err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL); 863 if (err < 0) 864 goto err_unlock; 865 866 GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */ 867 868 mutex_unlock(&file_priv->vm_idr_lock); 869 870 args->vm_id = err; 871 return 0; 872 873 err_unlock: 874 mutex_unlock(&file_priv->vm_idr_lock); 875 err_put: 876 i915_vm_put(&ppgtt->vm); 877 return err; 878 } 879 880 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 881 struct drm_file *file) 882 { 883 struct drm_i915_file_private *file_priv = file->driver_priv; 884 struct drm_i915_gem_vm_control *args = data; 885 struct i915_address_space *vm; 886 int err; 887 u32 id; 888 889 if (args->flags) 890 return -EINVAL; 891 892 if (args->extensions) 893 return -EINVAL; 894 895 id = args->vm_id; 896 if (!id) 897 return -ENOENT; 898 899 err = mutex_lock_interruptible(&file_priv->vm_idr_lock); 900 if (err) 901 return err; 902 903 vm = idr_remove(&file_priv->vm_idr, id); 904 905 mutex_unlock(&file_priv->vm_idr_lock); 906 if (!vm) 907 return -ENOENT; 908 909 i915_vm_put(vm); 910 return 0; 911 } 912 913 struct context_barrier_task { 914 struct i915_active base; 915 void (*task)(void *data); 916 void *data; 917 }; 918 919 __i915_active_call 920 static void cb_retire(struct i915_active *base) 921 { 922 struct context_barrier_task *cb = container_of(base, typeof(*cb), base); 923 924 if (cb->task) 925 cb->task(cb->data); 926 927 i915_active_fini(&cb->base); 928 kfree(cb); 929 } 930 931 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); 932 static int context_barrier_task(struct i915_gem_context *ctx, 933 intel_engine_mask_t engines, 934 bool (*skip)(struct intel_context *ce, void *data), 935 int (*emit)(struct i915_request *rq, void *data), 936 void (*task)(void *data), 937 void *data) 938 { 939 struct context_barrier_task *cb; 940 struct i915_gem_engines_iter it; 941 struct intel_context *ce; 942 int err = 0; 943 944 GEM_BUG_ON(!task); 945 946 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 947 if (!cb) 948 return -ENOMEM; 949 950 i915_active_init(&cb->base, NULL, cb_retire); 951 err = i915_active_acquire(&cb->base); 952 if (err) { 953 kfree(cb); 954 return err; 955 } 956 957 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 958 struct i915_request *rq; 959 960 if (I915_SELFTEST_ONLY(context_barrier_inject_fault & 961 ce->engine->mask)) { 962 err = -ENXIO; 963 break; 964 } 965 966 if (!(ce->engine->mask & engines)) 967 continue; 968 969 if (skip && skip(ce, data)) 970 continue; 971 972 rq = intel_context_create_request(ce); 973 if (IS_ERR(rq)) { 974 err = PTR_ERR(rq); 975 break; 976 } 977 978 err = 0; 979 if (emit) 980 err = emit(rq, data); 981 if (err == 0) 982 err = i915_active_add_request(&cb->base, rq); 983 984 i915_request_add(rq); 985 if (err) 986 break; 987 } 988 i915_gem_context_unlock_engines(ctx); 989 990 cb->task = err ? NULL : task; /* caller needs to unwind instead */ 991 cb->data = data; 992 993 i915_active_release(&cb->base); 994 995 return err; 996 } 997 998 static int get_ppgtt(struct drm_i915_file_private *file_priv, 999 struct i915_gem_context *ctx, 1000 struct drm_i915_gem_context_param *args) 1001 { 1002 struct i915_address_space *vm; 1003 int ret; 1004 1005 if (!rcu_access_pointer(ctx->vm)) 1006 return -ENODEV; 1007 1008 rcu_read_lock(); 1009 vm = i915_vm_get(ctx->vm); 1010 rcu_read_unlock(); 1011 1012 ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); 1013 if (ret) 1014 goto err_put; 1015 1016 ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL); 1017 GEM_BUG_ON(!ret); 1018 if (ret < 0) 1019 goto err_unlock; 1020 1021 i915_vm_open(vm); 1022 1023 args->size = 0; 1024 args->value = ret; 1025 1026 ret = 0; 1027 err_unlock: 1028 mutex_unlock(&file_priv->vm_idr_lock); 1029 err_put: 1030 i915_vm_put(vm); 1031 return ret; 1032 } 1033 1034 static void set_ppgtt_barrier(void *data) 1035 { 1036 struct i915_address_space *old = data; 1037 1038 if (INTEL_GEN(old->i915) < 8) 1039 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); 1040 1041 i915_vm_close(old); 1042 } 1043 1044 static int emit_ppgtt_update(struct i915_request *rq, void *data) 1045 { 1046 struct i915_address_space *vm = rq->hw_context->vm; 1047 struct intel_engine_cs *engine = rq->engine; 1048 u32 base = engine->mmio_base; 1049 u32 *cs; 1050 int i; 1051 1052 if (i915_vm_is_4lvl(vm)) { 1053 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1054 const dma_addr_t pd_daddr = px_dma(ppgtt->pd); 1055 1056 cs = intel_ring_begin(rq, 6); 1057 if (IS_ERR(cs)) 1058 return PTR_ERR(cs); 1059 1060 *cs++ = MI_LOAD_REGISTER_IMM(2); 1061 1062 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 1063 *cs++ = upper_32_bits(pd_daddr); 1064 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 1065 *cs++ = lower_32_bits(pd_daddr); 1066 1067 *cs++ = MI_NOOP; 1068 intel_ring_advance(rq, cs); 1069 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { 1070 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1071 int err; 1072 1073 /* Magic required to prevent forcewake errors! */ 1074 err = engine->emit_flush(rq, EMIT_INVALIDATE); 1075 if (err) 1076 return err; 1077 1078 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 1079 if (IS_ERR(cs)) 1080 return PTR_ERR(cs); 1081 1082 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 1083 for (i = GEN8_3LVL_PDPES; i--; ) { 1084 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1085 1086 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 1087 *cs++ = upper_32_bits(pd_daddr); 1088 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 1089 *cs++ = lower_32_bits(pd_daddr); 1090 } 1091 *cs++ = MI_NOOP; 1092 intel_ring_advance(rq, cs); 1093 } else { 1094 /* ppGTT is not part of the legacy context image */ 1095 gen6_ppgtt_pin(i915_vm_to_ppgtt(vm)); 1096 } 1097 1098 return 0; 1099 } 1100 1101 static bool skip_ppgtt_update(struct intel_context *ce, void *data) 1102 { 1103 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) 1104 return !ce->state; 1105 else 1106 return !atomic_read(&ce->pin_count); 1107 } 1108 1109 static int set_ppgtt(struct drm_i915_file_private *file_priv, 1110 struct i915_gem_context *ctx, 1111 struct drm_i915_gem_context_param *args) 1112 { 1113 struct i915_address_space *vm, *old; 1114 int err; 1115 1116 if (args->size) 1117 return -EINVAL; 1118 1119 if (!rcu_access_pointer(ctx->vm)) 1120 return -ENODEV; 1121 1122 if (upper_32_bits(args->value)) 1123 return -ENOENT; 1124 1125 rcu_read_lock(); 1126 vm = idr_find(&file_priv->vm_idr, args->value); 1127 if (vm && !kref_get_unless_zero(&vm->ref)) 1128 vm = NULL; 1129 rcu_read_unlock(); 1130 if (!vm) 1131 return -ENOENT; 1132 1133 err = mutex_lock_interruptible(&ctx->mutex); 1134 if (err) 1135 goto out; 1136 1137 if (i915_gem_context_is_closed(ctx)) { 1138 err = -ENOENT; 1139 goto out; 1140 } 1141 1142 if (vm == rcu_access_pointer(ctx->vm)) 1143 goto unlock; 1144 1145 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ 1146 lut_close(ctx); 1147 1148 old = __set_ppgtt(ctx, vm); 1149 1150 /* 1151 * We need to flush any requests using the current ppgtt before 1152 * we release it as the requests do not hold a reference themselves, 1153 * only indirectly through the context. 1154 */ 1155 err = context_barrier_task(ctx, ALL_ENGINES, 1156 skip_ppgtt_update, 1157 emit_ppgtt_update, 1158 set_ppgtt_barrier, 1159 old); 1160 if (err) { 1161 i915_vm_close(__set_ppgtt(ctx, old)); 1162 i915_vm_close(old); 1163 } 1164 1165 unlock: 1166 mutex_unlock(&ctx->mutex); 1167 out: 1168 i915_vm_put(vm); 1169 return err; 1170 } 1171 1172 static int gen8_emit_rpcs_config(struct i915_request *rq, 1173 struct intel_context *ce, 1174 struct intel_sseu sseu) 1175 { 1176 u64 offset; 1177 u32 *cs; 1178 1179 cs = intel_ring_begin(rq, 4); 1180 if (IS_ERR(cs)) 1181 return PTR_ERR(cs); 1182 1183 offset = i915_ggtt_offset(ce->state) + 1184 LRC_STATE_PN * PAGE_SIZE + 1185 CTX_R_PWR_CLK_STATE * 4; 1186 1187 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1188 *cs++ = lower_32_bits(offset); 1189 *cs++ = upper_32_bits(offset); 1190 *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu); 1191 1192 intel_ring_advance(rq, cs); 1193 1194 return 0; 1195 } 1196 1197 static int 1198 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) 1199 { 1200 struct i915_request *rq; 1201 int ret; 1202 1203 lockdep_assert_held(&ce->pin_mutex); 1204 1205 /* 1206 * If the context is not idle, we have to submit an ordered request to 1207 * modify its context image via the kernel context (writing to our own 1208 * image, or into the registers directory, does not stick). Pristine 1209 * and idle contexts will be configured on pinning. 1210 */ 1211 if (!intel_context_is_pinned(ce)) 1212 return 0; 1213 1214 rq = i915_request_create(ce->engine->kernel_context); 1215 if (IS_ERR(rq)) 1216 return PTR_ERR(rq); 1217 1218 /* Serialise with the remote context */ 1219 ret = intel_context_prepare_remote_request(ce, rq); 1220 if (ret == 0) 1221 ret = gen8_emit_rpcs_config(rq, ce, sseu); 1222 1223 i915_request_add(rq); 1224 return ret; 1225 } 1226 1227 static int 1228 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) 1229 { 1230 int ret; 1231 1232 GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8); 1233 1234 ret = intel_context_lock_pinned(ce); 1235 if (ret) 1236 return ret; 1237 1238 /* Nothing to do if unmodified. */ 1239 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) 1240 goto unlock; 1241 1242 ret = gen8_modify_rpcs(ce, sseu); 1243 if (!ret) 1244 ce->sseu = sseu; 1245 1246 unlock: 1247 intel_context_unlock_pinned(ce); 1248 return ret; 1249 } 1250 1251 static int 1252 user_to_context_sseu(struct drm_i915_private *i915, 1253 const struct drm_i915_gem_context_param_sseu *user, 1254 struct intel_sseu *context) 1255 { 1256 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; 1257 1258 /* No zeros in any field. */ 1259 if (!user->slice_mask || !user->subslice_mask || 1260 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1261 return -EINVAL; 1262 1263 /* Max > min. */ 1264 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1265 return -EINVAL; 1266 1267 /* 1268 * Some future proofing on the types since the uAPI is wider than the 1269 * current internal implementation. 1270 */ 1271 if (overflows_type(user->slice_mask, context->slice_mask) || 1272 overflows_type(user->subslice_mask, context->subslice_mask) || 1273 overflows_type(user->min_eus_per_subslice, 1274 context->min_eus_per_subslice) || 1275 overflows_type(user->max_eus_per_subslice, 1276 context->max_eus_per_subslice)) 1277 return -EINVAL; 1278 1279 /* Check validity against hardware. */ 1280 if (user->slice_mask & ~device->slice_mask) 1281 return -EINVAL; 1282 1283 if (user->subslice_mask & ~device->subslice_mask[0]) 1284 return -EINVAL; 1285 1286 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1287 return -EINVAL; 1288 1289 context->slice_mask = user->slice_mask; 1290 context->subslice_mask = user->subslice_mask; 1291 context->min_eus_per_subslice = user->min_eus_per_subslice; 1292 context->max_eus_per_subslice = user->max_eus_per_subslice; 1293 1294 /* Part specific restrictions. */ 1295 if (IS_GEN(i915, 11)) { 1296 unsigned int hw_s = hweight8(device->slice_mask); 1297 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 1298 unsigned int req_s = hweight8(context->slice_mask); 1299 unsigned int req_ss = hweight8(context->subslice_mask); 1300 1301 /* 1302 * Only full subslice enablement is possible if more than one 1303 * slice is turned on. 1304 */ 1305 if (req_s > 1 && req_ss != hw_ss_per_s) 1306 return -EINVAL; 1307 1308 /* 1309 * If more than four (SScount bitfield limit) subslices are 1310 * requested then the number has to be even. 1311 */ 1312 if (req_ss > 4 && (req_ss & 1)) 1313 return -EINVAL; 1314 1315 /* 1316 * If only one slice is enabled and subslice count is below the 1317 * device full enablement, it must be at most half of the all 1318 * available subslices. 1319 */ 1320 if (req_s == 1 && req_ss < hw_ss_per_s && 1321 req_ss > (hw_ss_per_s / 2)) 1322 return -EINVAL; 1323 1324 /* ABI restriction - VME use case only. */ 1325 1326 /* All slices or one slice only. */ 1327 if (req_s != 1 && req_s != hw_s) 1328 return -EINVAL; 1329 1330 /* 1331 * Half subslices or full enablement only when one slice is 1332 * enabled. 1333 */ 1334 if (req_s == 1 && 1335 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1336 return -EINVAL; 1337 1338 /* No EU configuration changes. */ 1339 if ((user->min_eus_per_subslice != 1340 device->max_eus_per_subslice) || 1341 (user->max_eus_per_subslice != 1342 device->max_eus_per_subslice)) 1343 return -EINVAL; 1344 } 1345 1346 return 0; 1347 } 1348 1349 static int set_sseu(struct i915_gem_context *ctx, 1350 struct drm_i915_gem_context_param *args) 1351 { 1352 struct drm_i915_private *i915 = ctx->i915; 1353 struct drm_i915_gem_context_param_sseu user_sseu; 1354 struct intel_context *ce; 1355 struct intel_sseu sseu; 1356 unsigned long lookup; 1357 int ret; 1358 1359 if (args->size < sizeof(user_sseu)) 1360 return -EINVAL; 1361 1362 if (!IS_GEN(i915, 11)) 1363 return -ENODEV; 1364 1365 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1366 sizeof(user_sseu))) 1367 return -EFAULT; 1368 1369 if (user_sseu.rsvd) 1370 return -EINVAL; 1371 1372 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1373 return -EINVAL; 1374 1375 lookup = 0; 1376 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 1377 lookup |= LOOKUP_USER_INDEX; 1378 1379 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 1380 if (IS_ERR(ce)) 1381 return PTR_ERR(ce); 1382 1383 /* Only render engine supports RPCS configuration. */ 1384 if (ce->engine->class != RENDER_CLASS) { 1385 ret = -ENODEV; 1386 goto out_ce; 1387 } 1388 1389 ret = user_to_context_sseu(i915, &user_sseu, &sseu); 1390 if (ret) 1391 goto out_ce; 1392 1393 ret = intel_context_reconfigure_sseu(ce, sseu); 1394 if (ret) 1395 goto out_ce; 1396 1397 args->size = sizeof(user_sseu); 1398 1399 out_ce: 1400 intel_context_put(ce); 1401 return ret; 1402 } 1403 1404 struct set_engines { 1405 struct i915_gem_context *ctx; 1406 struct i915_gem_engines *engines; 1407 }; 1408 1409 static int 1410 set_engines__load_balance(struct i915_user_extension __user *base, void *data) 1411 { 1412 struct i915_context_engines_load_balance __user *ext = 1413 container_of_user(base, typeof(*ext), base); 1414 const struct set_engines *set = data; 1415 struct intel_engine_cs *stack[16]; 1416 struct intel_engine_cs **siblings; 1417 struct intel_context *ce; 1418 u16 num_siblings, idx; 1419 unsigned int n; 1420 int err; 1421 1422 if (!HAS_EXECLISTS(set->ctx->i915)) 1423 return -ENODEV; 1424 1425 if (USES_GUC_SUBMISSION(set->ctx->i915)) 1426 return -ENODEV; /* not implement yet */ 1427 1428 if (get_user(idx, &ext->engine_index)) 1429 return -EFAULT; 1430 1431 if (idx >= set->engines->num_engines) { 1432 DRM_DEBUG("Invalid placement value, %d >= %d\n", 1433 idx, set->engines->num_engines); 1434 return -EINVAL; 1435 } 1436 1437 idx = array_index_nospec(idx, set->engines->num_engines); 1438 if (set->engines->engines[idx]) { 1439 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx); 1440 return -EEXIST; 1441 } 1442 1443 if (get_user(num_siblings, &ext->num_siblings)) 1444 return -EFAULT; 1445 1446 err = check_user_mbz(&ext->flags); 1447 if (err) 1448 return err; 1449 1450 err = check_user_mbz(&ext->mbz64); 1451 if (err) 1452 return err; 1453 1454 siblings = stack; 1455 if (num_siblings > ARRAY_SIZE(stack)) { 1456 siblings = kmalloc_array(num_siblings, 1457 sizeof(*siblings), 1458 GFP_KERNEL); 1459 if (!siblings) 1460 return -ENOMEM; 1461 } 1462 1463 for (n = 0; n < num_siblings; n++) { 1464 struct i915_engine_class_instance ci; 1465 1466 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 1467 err = -EFAULT; 1468 goto out_siblings; 1469 } 1470 1471 siblings[n] = intel_engine_lookup_user(set->ctx->i915, 1472 ci.engine_class, 1473 ci.engine_instance); 1474 if (!siblings[n]) { 1475 DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n", 1476 n, ci.engine_class, ci.engine_instance); 1477 err = -EINVAL; 1478 goto out_siblings; 1479 } 1480 } 1481 1482 ce = intel_execlists_create_virtual(set->ctx, siblings, n); 1483 if (IS_ERR(ce)) { 1484 err = PTR_ERR(ce); 1485 goto out_siblings; 1486 } 1487 1488 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { 1489 intel_context_put(ce); 1490 err = -EEXIST; 1491 goto out_siblings; 1492 } 1493 1494 out_siblings: 1495 if (siblings != stack) 1496 kfree(siblings); 1497 1498 return err; 1499 } 1500 1501 static int 1502 set_engines__bond(struct i915_user_extension __user *base, void *data) 1503 { 1504 struct i915_context_engines_bond __user *ext = 1505 container_of_user(base, typeof(*ext), base); 1506 const struct set_engines *set = data; 1507 struct i915_engine_class_instance ci; 1508 struct intel_engine_cs *virtual; 1509 struct intel_engine_cs *master; 1510 u16 idx, num_bonds; 1511 int err, n; 1512 1513 if (get_user(idx, &ext->virtual_index)) 1514 return -EFAULT; 1515 1516 if (idx >= set->engines->num_engines) { 1517 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n", 1518 idx, set->engines->num_engines); 1519 return -EINVAL; 1520 } 1521 1522 idx = array_index_nospec(idx, set->engines->num_engines); 1523 if (!set->engines->engines[idx]) { 1524 DRM_DEBUG("Invalid engine at %d\n", idx); 1525 return -EINVAL; 1526 } 1527 virtual = set->engines->engines[idx]->engine; 1528 1529 err = check_user_mbz(&ext->flags); 1530 if (err) 1531 return err; 1532 1533 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 1534 err = check_user_mbz(&ext->mbz64[n]); 1535 if (err) 1536 return err; 1537 } 1538 1539 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 1540 return -EFAULT; 1541 1542 master = intel_engine_lookup_user(set->ctx->i915, 1543 ci.engine_class, ci.engine_instance); 1544 if (!master) { 1545 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n", 1546 ci.engine_class, ci.engine_instance); 1547 return -EINVAL; 1548 } 1549 1550 if (get_user(num_bonds, &ext->num_bonds)) 1551 return -EFAULT; 1552 1553 for (n = 0; n < num_bonds; n++) { 1554 struct intel_engine_cs *bond; 1555 1556 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 1557 return -EFAULT; 1558 1559 bond = intel_engine_lookup_user(set->ctx->i915, 1560 ci.engine_class, 1561 ci.engine_instance); 1562 if (!bond) { 1563 DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 1564 n, ci.engine_class, ci.engine_instance); 1565 return -EINVAL; 1566 } 1567 1568 /* 1569 * A non-virtual engine has no siblings to choose between; and 1570 * a submit fence will always be directed to the one engine. 1571 */ 1572 if (intel_engine_is_virtual(virtual)) { 1573 err = intel_virtual_engine_attach_bond(virtual, 1574 master, 1575 bond); 1576 if (err) 1577 return err; 1578 } 1579 } 1580 1581 return 0; 1582 } 1583 1584 static const i915_user_extension_fn set_engines__extensions[] = { 1585 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, 1586 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, 1587 }; 1588 1589 static int 1590 set_engines(struct i915_gem_context *ctx, 1591 const struct drm_i915_gem_context_param *args) 1592 { 1593 struct i915_context_param_engines __user *user = 1594 u64_to_user_ptr(args->value); 1595 struct set_engines set = { .ctx = ctx }; 1596 unsigned int num_engines, n; 1597 u64 extensions; 1598 int err; 1599 1600 if (!args->size) { /* switch back to legacy user_ring_map */ 1601 if (!i915_gem_context_user_engines(ctx)) 1602 return 0; 1603 1604 set.engines = default_engines(ctx); 1605 if (IS_ERR(set.engines)) 1606 return PTR_ERR(set.engines); 1607 1608 goto replace; 1609 } 1610 1611 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); 1612 if (args->size < sizeof(*user) || 1613 !IS_ALIGNED(args->size, sizeof(*user->engines))) { 1614 DRM_DEBUG("Invalid size for engine array: %d\n", 1615 args->size); 1616 return -EINVAL; 1617 } 1618 1619 /* 1620 * Note that I915_EXEC_RING_MASK limits execbuf to only using the 1621 * first 64 engines defined here. 1622 */ 1623 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 1624 1625 set.engines = kmalloc(struct_size(set.engines, engines, num_engines), 1626 GFP_KERNEL); 1627 if (!set.engines) 1628 return -ENOMEM; 1629 1630 init_rcu_head(&set.engines->rcu); 1631 for (n = 0; n < num_engines; n++) { 1632 struct i915_engine_class_instance ci; 1633 struct intel_engine_cs *engine; 1634 struct intel_context *ce; 1635 1636 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 1637 __free_engines(set.engines, n); 1638 return -EFAULT; 1639 } 1640 1641 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 1642 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { 1643 set.engines->engines[n] = NULL; 1644 continue; 1645 } 1646 1647 engine = intel_engine_lookup_user(ctx->i915, 1648 ci.engine_class, 1649 ci.engine_instance); 1650 if (!engine) { 1651 DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n", 1652 n, ci.engine_class, ci.engine_instance); 1653 __free_engines(set.engines, n); 1654 return -ENOENT; 1655 } 1656 1657 ce = intel_context_create(ctx, engine); 1658 if (IS_ERR(ce)) { 1659 __free_engines(set.engines, n); 1660 return PTR_ERR(ce); 1661 } 1662 1663 set.engines->engines[n] = ce; 1664 } 1665 set.engines->num_engines = num_engines; 1666 1667 err = -EFAULT; 1668 if (!get_user(extensions, &user->extensions)) 1669 err = i915_user_extensions(u64_to_user_ptr(extensions), 1670 set_engines__extensions, 1671 ARRAY_SIZE(set_engines__extensions), 1672 &set); 1673 if (err) { 1674 free_engines(set.engines); 1675 return err; 1676 } 1677 1678 replace: 1679 mutex_lock(&ctx->engines_mutex); 1680 if (args->size) 1681 i915_gem_context_set_user_engines(ctx); 1682 else 1683 i915_gem_context_clear_user_engines(ctx); 1684 rcu_swap_protected(ctx->engines, set.engines, 1); 1685 mutex_unlock(&ctx->engines_mutex); 1686 1687 call_rcu(&set.engines->rcu, free_engines_rcu); 1688 1689 return 0; 1690 } 1691 1692 static struct i915_gem_engines * 1693 __copy_engines(struct i915_gem_engines *e) 1694 { 1695 struct i915_gem_engines *copy; 1696 unsigned int n; 1697 1698 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 1699 if (!copy) 1700 return ERR_PTR(-ENOMEM); 1701 1702 init_rcu_head(©->rcu); 1703 for (n = 0; n < e->num_engines; n++) { 1704 if (e->engines[n]) 1705 copy->engines[n] = intel_context_get(e->engines[n]); 1706 else 1707 copy->engines[n] = NULL; 1708 } 1709 copy->num_engines = n; 1710 1711 return copy; 1712 } 1713 1714 static int 1715 get_engines(struct i915_gem_context *ctx, 1716 struct drm_i915_gem_context_param *args) 1717 { 1718 struct i915_context_param_engines __user *user; 1719 struct i915_gem_engines *e; 1720 size_t n, count, size; 1721 int err = 0; 1722 1723 err = mutex_lock_interruptible(&ctx->engines_mutex); 1724 if (err) 1725 return err; 1726 1727 e = NULL; 1728 if (i915_gem_context_user_engines(ctx)) 1729 e = __copy_engines(i915_gem_context_engines(ctx)); 1730 mutex_unlock(&ctx->engines_mutex); 1731 if (IS_ERR_OR_NULL(e)) { 1732 args->size = 0; 1733 return PTR_ERR_OR_ZERO(e); 1734 } 1735 1736 count = e->num_engines; 1737 1738 /* Be paranoid in case we have an impedance mismatch */ 1739 if (!check_struct_size(user, engines, count, &size)) { 1740 err = -EINVAL; 1741 goto err_free; 1742 } 1743 if (overflows_type(size, args->size)) { 1744 err = -EINVAL; 1745 goto err_free; 1746 } 1747 1748 if (!args->size) { 1749 args->size = size; 1750 goto err_free; 1751 } 1752 1753 if (args->size < size) { 1754 err = -EINVAL; 1755 goto err_free; 1756 } 1757 1758 user = u64_to_user_ptr(args->value); 1759 if (!access_ok(user, size)) { 1760 err = -EFAULT; 1761 goto err_free; 1762 } 1763 1764 if (put_user(0, &user->extensions)) { 1765 err = -EFAULT; 1766 goto err_free; 1767 } 1768 1769 for (n = 0; n < count; n++) { 1770 struct i915_engine_class_instance ci = { 1771 .engine_class = I915_ENGINE_CLASS_INVALID, 1772 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, 1773 }; 1774 1775 if (e->engines[n]) { 1776 ci.engine_class = e->engines[n]->engine->uabi_class; 1777 ci.engine_instance = e->engines[n]->engine->uabi_instance; 1778 } 1779 1780 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { 1781 err = -EFAULT; 1782 goto err_free; 1783 } 1784 } 1785 1786 args->size = size; 1787 1788 err_free: 1789 free_engines(e); 1790 return err; 1791 } 1792 1793 static int 1794 set_persistence(struct i915_gem_context *ctx, 1795 const struct drm_i915_gem_context_param *args) 1796 { 1797 if (args->size) 1798 return -EINVAL; 1799 1800 return __context_set_persistence(ctx, args->value); 1801 } 1802 1803 static int ctx_setparam(struct drm_i915_file_private *fpriv, 1804 struct i915_gem_context *ctx, 1805 struct drm_i915_gem_context_param *args) 1806 { 1807 int ret = 0; 1808 1809 switch (args->param) { 1810 case I915_CONTEXT_PARAM_NO_ZEROMAP: 1811 if (args->size) 1812 ret = -EINVAL; 1813 else if (args->value) 1814 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 1815 else 1816 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 1817 break; 1818 1819 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 1820 if (args->size) 1821 ret = -EINVAL; 1822 else if (args->value) 1823 i915_gem_context_set_no_error_capture(ctx); 1824 else 1825 i915_gem_context_clear_no_error_capture(ctx); 1826 break; 1827 1828 case I915_CONTEXT_PARAM_BANNABLE: 1829 if (args->size) 1830 ret = -EINVAL; 1831 else if (!capable(CAP_SYS_ADMIN) && !args->value) 1832 ret = -EPERM; 1833 else if (args->value) 1834 i915_gem_context_set_bannable(ctx); 1835 else 1836 i915_gem_context_clear_bannable(ctx); 1837 break; 1838 1839 case I915_CONTEXT_PARAM_RECOVERABLE: 1840 if (args->size) 1841 ret = -EINVAL; 1842 else if (args->value) 1843 i915_gem_context_set_recoverable(ctx); 1844 else 1845 i915_gem_context_clear_recoverable(ctx); 1846 break; 1847 1848 case I915_CONTEXT_PARAM_PRIORITY: 1849 { 1850 s64 priority = args->value; 1851 1852 if (args->size) 1853 ret = -EINVAL; 1854 else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 1855 ret = -ENODEV; 1856 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 1857 priority < I915_CONTEXT_MIN_USER_PRIORITY) 1858 ret = -EINVAL; 1859 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 1860 !capable(CAP_SYS_NICE)) 1861 ret = -EPERM; 1862 else 1863 ctx->sched.priority = 1864 I915_USER_PRIORITY(priority); 1865 } 1866 break; 1867 1868 case I915_CONTEXT_PARAM_SSEU: 1869 ret = set_sseu(ctx, args); 1870 break; 1871 1872 case I915_CONTEXT_PARAM_VM: 1873 ret = set_ppgtt(fpriv, ctx, args); 1874 break; 1875 1876 case I915_CONTEXT_PARAM_ENGINES: 1877 ret = set_engines(ctx, args); 1878 break; 1879 1880 case I915_CONTEXT_PARAM_PERSISTENCE: 1881 ret = set_persistence(ctx, args); 1882 break; 1883 1884 case I915_CONTEXT_PARAM_BAN_PERIOD: 1885 default: 1886 ret = -EINVAL; 1887 break; 1888 } 1889 1890 return ret; 1891 } 1892 1893 struct create_ext { 1894 struct i915_gem_context *ctx; 1895 struct drm_i915_file_private *fpriv; 1896 }; 1897 1898 static int create_setparam(struct i915_user_extension __user *ext, void *data) 1899 { 1900 struct drm_i915_gem_context_create_ext_setparam local; 1901 const struct create_ext *arg = data; 1902 1903 if (copy_from_user(&local, ext, sizeof(local))) 1904 return -EFAULT; 1905 1906 if (local.param.ctx_id) 1907 return -EINVAL; 1908 1909 return ctx_setparam(arg->fpriv, arg->ctx, &local.param); 1910 } 1911 1912 static int clone_engines(struct i915_gem_context *dst, 1913 struct i915_gem_context *src) 1914 { 1915 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 1916 struct i915_gem_engines *clone; 1917 bool user_engines; 1918 unsigned long n; 1919 1920 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 1921 if (!clone) 1922 goto err_unlock; 1923 1924 init_rcu_head(&clone->rcu); 1925 for (n = 0; n < e->num_engines; n++) { 1926 struct intel_engine_cs *engine; 1927 1928 if (!e->engines[n]) { 1929 clone->engines[n] = NULL; 1930 continue; 1931 } 1932 engine = e->engines[n]->engine; 1933 1934 /* 1935 * Virtual engines are singletons; they can only exist 1936 * inside a single context, because they embed their 1937 * HW context... As each virtual context implies a single 1938 * timeline (each engine can only dequeue a single request 1939 * at any time), it would be surprising for two contexts 1940 * to use the same engine. So let's create a copy of 1941 * the virtual engine instead. 1942 */ 1943 if (intel_engine_is_virtual(engine)) 1944 clone->engines[n] = 1945 intel_execlists_clone_virtual(dst, engine); 1946 else 1947 clone->engines[n] = intel_context_create(dst, engine); 1948 if (IS_ERR_OR_NULL(clone->engines[n])) { 1949 __free_engines(clone, n); 1950 goto err_unlock; 1951 } 1952 } 1953 clone->num_engines = n; 1954 1955 user_engines = i915_gem_context_user_engines(src); 1956 i915_gem_context_unlock_engines(src); 1957 1958 free_engines(dst->engines); 1959 RCU_INIT_POINTER(dst->engines, clone); 1960 if (user_engines) 1961 i915_gem_context_set_user_engines(dst); 1962 else 1963 i915_gem_context_clear_user_engines(dst); 1964 return 0; 1965 1966 err_unlock: 1967 i915_gem_context_unlock_engines(src); 1968 return -ENOMEM; 1969 } 1970 1971 static int clone_flags(struct i915_gem_context *dst, 1972 struct i915_gem_context *src) 1973 { 1974 dst->user_flags = src->user_flags; 1975 return 0; 1976 } 1977 1978 static int clone_schedattr(struct i915_gem_context *dst, 1979 struct i915_gem_context *src) 1980 { 1981 dst->sched = src->sched; 1982 return 0; 1983 } 1984 1985 static int clone_sseu(struct i915_gem_context *dst, 1986 struct i915_gem_context *src) 1987 { 1988 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 1989 struct i915_gem_engines *clone; 1990 unsigned long n; 1991 int err; 1992 1993 clone = dst->engines; /* no locking required; sole access */ 1994 if (e->num_engines != clone->num_engines) { 1995 err = -EINVAL; 1996 goto unlock; 1997 } 1998 1999 for (n = 0; n < e->num_engines; n++) { 2000 struct intel_context *ce = e->engines[n]; 2001 2002 if (clone->engines[n]->engine->class != ce->engine->class) { 2003 /* Must have compatible engine maps! */ 2004 err = -EINVAL; 2005 goto unlock; 2006 } 2007 2008 /* serialises with set_sseu */ 2009 err = intel_context_lock_pinned(ce); 2010 if (err) 2011 goto unlock; 2012 2013 clone->engines[n]->sseu = ce->sseu; 2014 intel_context_unlock_pinned(ce); 2015 } 2016 2017 err = 0; 2018 unlock: 2019 i915_gem_context_unlock_engines(src); 2020 return err; 2021 } 2022 2023 static int clone_timeline(struct i915_gem_context *dst, 2024 struct i915_gem_context *src) 2025 { 2026 if (src->timeline) 2027 __assign_timeline(dst, src->timeline); 2028 2029 return 0; 2030 } 2031 2032 static int clone_vm(struct i915_gem_context *dst, 2033 struct i915_gem_context *src) 2034 { 2035 struct i915_address_space *vm; 2036 int err = 0; 2037 2038 rcu_read_lock(); 2039 do { 2040 vm = rcu_dereference(src->vm); 2041 if (!vm) 2042 break; 2043 2044 if (!kref_get_unless_zero(&vm->ref)) 2045 continue; 2046 2047 /* 2048 * This ppgtt may have be reallocated between 2049 * the read and the kref, and reassigned to a third 2050 * context. In order to avoid inadvertent sharing 2051 * of this ppgtt with that third context (and not 2052 * src), we have to confirm that we have the same 2053 * ppgtt after passing through the strong memory 2054 * barrier implied by a successful 2055 * kref_get_unless_zero(). 2056 * 2057 * Once we have acquired the current ppgtt of src, 2058 * we no longer care if it is released from src, as 2059 * it cannot be reallocated elsewhere. 2060 */ 2061 2062 if (vm == rcu_access_pointer(src->vm)) 2063 break; 2064 2065 i915_vm_put(vm); 2066 } while (1); 2067 rcu_read_unlock(); 2068 2069 if (vm) { 2070 if (!mutex_lock_interruptible(&dst->mutex)) { 2071 __assign_ppgtt(dst, vm); 2072 mutex_unlock(&dst->mutex); 2073 } else { 2074 err = -EINTR; 2075 } 2076 i915_vm_put(vm); 2077 } 2078 2079 return err; 2080 } 2081 2082 static int create_clone(struct i915_user_extension __user *ext, void *data) 2083 { 2084 static int (* const fn[])(struct i915_gem_context *dst, 2085 struct i915_gem_context *src) = { 2086 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y 2087 MAP(ENGINES, clone_engines), 2088 MAP(FLAGS, clone_flags), 2089 MAP(SCHEDATTR, clone_schedattr), 2090 MAP(SSEU, clone_sseu), 2091 MAP(TIMELINE, clone_timeline), 2092 MAP(VM, clone_vm), 2093 #undef MAP 2094 }; 2095 struct drm_i915_gem_context_create_ext_clone local; 2096 const struct create_ext *arg = data; 2097 struct i915_gem_context *dst = arg->ctx; 2098 struct i915_gem_context *src; 2099 int err, bit; 2100 2101 if (copy_from_user(&local, ext, sizeof(local))) 2102 return -EFAULT; 2103 2104 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != 2105 I915_CONTEXT_CLONE_UNKNOWN); 2106 2107 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) 2108 return -EINVAL; 2109 2110 if (local.rsvd) 2111 return -EINVAL; 2112 2113 rcu_read_lock(); 2114 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); 2115 rcu_read_unlock(); 2116 if (!src) 2117 return -ENOENT; 2118 2119 GEM_BUG_ON(src == dst); 2120 2121 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { 2122 if (!(local.flags & BIT(bit))) 2123 continue; 2124 2125 err = fn[bit](dst, src); 2126 if (err) 2127 return err; 2128 } 2129 2130 return 0; 2131 } 2132 2133 static const i915_user_extension_fn create_extensions[] = { 2134 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 2135 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, 2136 }; 2137 2138 static bool client_is_banned(struct drm_i915_file_private *file_priv) 2139 { 2140 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 2141 } 2142 2143 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2144 struct drm_file *file) 2145 { 2146 struct drm_i915_private *i915 = to_i915(dev); 2147 struct drm_i915_gem_context_create_ext *args = data; 2148 struct create_ext ext_data; 2149 int ret; 2150 2151 if (!DRIVER_CAPS(i915)->has_logical_contexts) 2152 return -ENODEV; 2153 2154 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 2155 return -EINVAL; 2156 2157 ret = intel_gt_terminally_wedged(&i915->gt); 2158 if (ret) 2159 return ret; 2160 2161 ext_data.fpriv = file->driver_priv; 2162 if (client_is_banned(ext_data.fpriv)) { 2163 DRM_DEBUG("client %s[%d] banned from creating ctx\n", 2164 current->comm, 2165 pid_nr(get_task_pid(current, PIDTYPE_PID))); 2166 return -EIO; 2167 } 2168 2169 ext_data.ctx = i915_gem_create_context(i915, args->flags); 2170 if (IS_ERR(ext_data.ctx)) 2171 return PTR_ERR(ext_data.ctx); 2172 2173 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 2174 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 2175 create_extensions, 2176 ARRAY_SIZE(create_extensions), 2177 &ext_data); 2178 if (ret) 2179 goto err_ctx; 2180 } 2181 2182 ret = gem_context_register(ext_data.ctx, ext_data.fpriv); 2183 if (ret < 0) 2184 goto err_ctx; 2185 2186 args->ctx_id = ret; 2187 DRM_DEBUG("HW context %d created\n", args->ctx_id); 2188 2189 return 0; 2190 2191 err_ctx: 2192 context_close(ext_data.ctx); 2193 return ret; 2194 } 2195 2196 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2197 struct drm_file *file) 2198 { 2199 struct drm_i915_gem_context_destroy *args = data; 2200 struct drm_i915_file_private *file_priv = file->driver_priv; 2201 struct i915_gem_context *ctx; 2202 2203 if (args->pad != 0) 2204 return -EINVAL; 2205 2206 if (!args->ctx_id) 2207 return -ENOENT; 2208 2209 if (mutex_lock_interruptible(&file_priv->context_idr_lock)) 2210 return -EINTR; 2211 2212 ctx = idr_remove(&file_priv->context_idr, args->ctx_id); 2213 mutex_unlock(&file_priv->context_idr_lock); 2214 if (!ctx) 2215 return -ENOENT; 2216 2217 context_close(ctx); 2218 return 0; 2219 } 2220 2221 static int get_sseu(struct i915_gem_context *ctx, 2222 struct drm_i915_gem_context_param *args) 2223 { 2224 struct drm_i915_gem_context_param_sseu user_sseu; 2225 struct intel_context *ce; 2226 unsigned long lookup; 2227 int err; 2228 2229 if (args->size == 0) 2230 goto out; 2231 else if (args->size < sizeof(user_sseu)) 2232 return -EINVAL; 2233 2234 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2235 sizeof(user_sseu))) 2236 return -EFAULT; 2237 2238 if (user_sseu.rsvd) 2239 return -EINVAL; 2240 2241 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2242 return -EINVAL; 2243 2244 lookup = 0; 2245 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2246 lookup |= LOOKUP_USER_INDEX; 2247 2248 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2249 if (IS_ERR(ce)) 2250 return PTR_ERR(ce); 2251 2252 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2253 if (err) { 2254 intel_context_put(ce); 2255 return err; 2256 } 2257 2258 user_sseu.slice_mask = ce->sseu.slice_mask; 2259 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2260 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2261 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2262 2263 intel_context_unlock_pinned(ce); 2264 intel_context_put(ce); 2265 2266 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2267 sizeof(user_sseu))) 2268 return -EFAULT; 2269 2270 out: 2271 args->size = sizeof(user_sseu); 2272 2273 return 0; 2274 } 2275 2276 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2277 struct drm_file *file) 2278 { 2279 struct drm_i915_file_private *file_priv = file->driver_priv; 2280 struct drm_i915_gem_context_param *args = data; 2281 struct i915_gem_context *ctx; 2282 int ret = 0; 2283 2284 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2285 if (!ctx) 2286 return -ENOENT; 2287 2288 switch (args->param) { 2289 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2290 args->size = 0; 2291 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2292 break; 2293 2294 case I915_CONTEXT_PARAM_GTT_SIZE: 2295 args->size = 0; 2296 rcu_read_lock(); 2297 if (rcu_access_pointer(ctx->vm)) 2298 args->value = rcu_dereference(ctx->vm)->total; 2299 else 2300 args->value = to_i915(dev)->ggtt.vm.total; 2301 rcu_read_unlock(); 2302 break; 2303 2304 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2305 args->size = 0; 2306 args->value = i915_gem_context_no_error_capture(ctx); 2307 break; 2308 2309 case I915_CONTEXT_PARAM_BANNABLE: 2310 args->size = 0; 2311 args->value = i915_gem_context_is_bannable(ctx); 2312 break; 2313 2314 case I915_CONTEXT_PARAM_RECOVERABLE: 2315 args->size = 0; 2316 args->value = i915_gem_context_is_recoverable(ctx); 2317 break; 2318 2319 case I915_CONTEXT_PARAM_PRIORITY: 2320 args->size = 0; 2321 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; 2322 break; 2323 2324 case I915_CONTEXT_PARAM_SSEU: 2325 ret = get_sseu(ctx, args); 2326 break; 2327 2328 case I915_CONTEXT_PARAM_VM: 2329 ret = get_ppgtt(file_priv, ctx, args); 2330 break; 2331 2332 case I915_CONTEXT_PARAM_ENGINES: 2333 ret = get_engines(ctx, args); 2334 break; 2335 2336 case I915_CONTEXT_PARAM_PERSISTENCE: 2337 args->size = 0; 2338 args->value = i915_gem_context_is_persistent(ctx); 2339 break; 2340 2341 case I915_CONTEXT_PARAM_BAN_PERIOD: 2342 default: 2343 ret = -EINVAL; 2344 break; 2345 } 2346 2347 i915_gem_context_put(ctx); 2348 return ret; 2349 } 2350 2351 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2352 struct drm_file *file) 2353 { 2354 struct drm_i915_file_private *file_priv = file->driver_priv; 2355 struct drm_i915_gem_context_param *args = data; 2356 struct i915_gem_context *ctx; 2357 int ret; 2358 2359 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2360 if (!ctx) 2361 return -ENOENT; 2362 2363 ret = ctx_setparam(file_priv, ctx, args); 2364 2365 i915_gem_context_put(ctx); 2366 return ret; 2367 } 2368 2369 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2370 void *data, struct drm_file *file) 2371 { 2372 struct drm_i915_private *i915 = to_i915(dev); 2373 struct drm_i915_reset_stats *args = data; 2374 struct i915_gem_context *ctx; 2375 int ret; 2376 2377 if (args->flags || args->pad) 2378 return -EINVAL; 2379 2380 ret = -ENOENT; 2381 rcu_read_lock(); 2382 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); 2383 if (!ctx) 2384 goto out; 2385 2386 /* 2387 * We opt for unserialised reads here. This may result in tearing 2388 * in the extremely unlikely event of a GPU hang on this context 2389 * as we are querying them. If we need that extra layer of protection, 2390 * we should wrap the hangstats with a seqlock. 2391 */ 2392 2393 if (capable(CAP_SYS_ADMIN)) 2394 args->reset_count = i915_reset_count(&i915->gpu_error); 2395 else 2396 args->reset_count = 0; 2397 2398 args->batch_active = atomic_read(&ctx->guilty_count); 2399 args->batch_pending = atomic_read(&ctx->active_count); 2400 2401 ret = 0; 2402 out: 2403 rcu_read_unlock(); 2404 return ret; 2405 } 2406 2407 /* GEM context-engines iterator: for_each_gem_engine() */ 2408 struct intel_context * 2409 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2410 { 2411 const struct i915_gem_engines *e = it->engines; 2412 struct intel_context *ctx; 2413 2414 do { 2415 if (it->idx >= e->num_engines) 2416 return NULL; 2417 2418 ctx = e->engines[it->idx++]; 2419 } while (!ctx); 2420 2421 return ctx; 2422 } 2423 2424 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2425 #include "selftests/mock_context.c" 2426 #include "selftests/i915_gem_context.c" 2427 #endif 2428 2429 static void i915_global_gem_context_shrink(void) 2430 { 2431 kmem_cache_shrink(global.slab_luts); 2432 } 2433 2434 static void i915_global_gem_context_exit(void) 2435 { 2436 kmem_cache_destroy(global.slab_luts); 2437 } 2438 2439 static struct i915_global_gem_context global = { { 2440 .shrink = i915_global_gem_context_shrink, 2441 .exit = i915_global_gem_context_exit, 2442 } }; 2443 2444 int __init i915_global_gem_context_init(void) 2445 { 2446 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2447 if (!global.slab_luts) 2448 return -ENOMEM; 2449 2450 i915_global_register(&global.base); 2451 return 0; 2452 } 2453