1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2011-2012 Intel Corporation 5 */ 6 7 /* 8 * This file implements HW context support. On gen5+ a HW context consists of an 9 * opaque GPU object which is referenced at times of context saves and restores. 10 * With RC6 enabled, the context is also referenced as the GPU enters and exists 11 * from RC6 (GPU has it's own internal power context, except on gen5). Though 12 * something like a context does exist for the media ring, the code only 13 * supports contexts for the render ring. 14 * 15 * In software, there is a distinction between contexts created by the user, 16 * and the default HW context. The default HW context is used by GPU clients 17 * that do not request setup of their own hardware context. The default 18 * context's state is never restored to help prevent programming errors. This 19 * would happen if a client ran and piggy-backed off another clients GPU state. 20 * The default context only exists to give the GPU some offset to load as the 21 * current to invoke a save of the context we actually care about. In fact, the 22 * code could likely be constructed, albeit in a more complicated fashion, to 23 * never use the default context, though that limits the driver's ability to 24 * swap out, and/or destroy other contexts. 25 * 26 * All other contexts are created as a request by the GPU client. These contexts 27 * store GPU state, and thus allow GPU clients to not re-emit state (and 28 * potentially query certain state) at any time. The kernel driver makes 29 * certain that the appropriate commands are inserted. 30 * 31 * The context life cycle is semi-complicated in that context BOs may live 32 * longer than the context itself because of the way the hardware, and object 33 * tracking works. Below is a very crude representation of the state machine 34 * describing the context life. 35 * refcount pincount active 36 * S0: initial state 0 0 0 37 * S1: context created 1 0 0 38 * S2: context is currently running 2 1 X 39 * S3: GPU referenced, but not current 2 0 1 40 * S4: context is current, but destroyed 1 1 0 41 * S5: like S3, but destroyed 1 0 1 42 * 43 * The most common (but not all) transitions: 44 * S0->S1: client creates a context 45 * S1->S2: client submits execbuf with context 46 * S2->S3: other clients submits execbuf with context 47 * S3->S1: context object was retired 48 * S3->S2: clients submits another execbuf 49 * S2->S4: context destroy called with current context 50 * S3->S5->S0: destroy path 51 * S4->S5->S0: destroy path on current context 52 * 53 * There are two confusing terms used above: 54 * The "current context" means the context which is currently running on the 55 * GPU. The GPU has loaded its state already and has stored away the gtt 56 * offset of the BO. The GPU is not actively referencing the data at this 57 * offset, but it will on the next context switch. The only way to avoid this 58 * is to do a GPU reset. 59 * 60 * An "active context' is one which was previously the "current context" and is 61 * on the active list waiting for the next context switch to occur. Until this 62 * happens, the object must remain at the same gtt offset. It is therefore 63 * possible to destroy a context, but it is still active. 64 * 65 */ 66 67 #include <linux/log2.h> 68 #include <linux/nospec.h> 69 70 #include <drm/i915_drm.h> 71 72 #include "gt/gen6_ppgtt.h" 73 #include "gt/intel_context.h" 74 #include "gt/intel_engine_heartbeat.h" 75 #include "gt/intel_engine_pm.h" 76 #include "gt/intel_engine_user.h" 77 #include "gt/intel_lrc_reg.h" 78 #include "gt/intel_ring.h" 79 80 #include "i915_gem_context.h" 81 #include "i915_globals.h" 82 #include "i915_trace.h" 83 #include "i915_user_extensions.h" 84 85 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 86 87 static struct i915_global_gem_context { 88 struct i915_global base; 89 struct kmem_cache *slab_luts; 90 } global; 91 92 struct i915_lut_handle *i915_lut_handle_alloc(void) 93 { 94 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); 95 } 96 97 void i915_lut_handle_free(struct i915_lut_handle *lut) 98 { 99 return kmem_cache_free(global.slab_luts, lut); 100 } 101 102 static void lut_close(struct i915_gem_context *ctx) 103 { 104 struct radix_tree_iter iter; 105 void __rcu **slot; 106 107 lockdep_assert_held(&ctx->mutex); 108 109 rcu_read_lock(); 110 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 111 struct i915_vma *vma = rcu_dereference_raw(*slot); 112 struct drm_i915_gem_object *obj = vma->obj; 113 struct i915_lut_handle *lut; 114 115 if (!kref_get_unless_zero(&obj->base.refcount)) 116 continue; 117 118 rcu_read_unlock(); 119 i915_gem_object_lock(obj); 120 list_for_each_entry(lut, &obj->lut_list, obj_link) { 121 if (lut->ctx != ctx) 122 continue; 123 124 if (lut->handle != iter.index) 125 continue; 126 127 list_del(&lut->obj_link); 128 break; 129 } 130 i915_gem_object_unlock(obj); 131 rcu_read_lock(); 132 133 if (&lut->obj_link != &obj->lut_list) { 134 i915_lut_handle_free(lut); 135 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 136 if (atomic_dec_and_test(&vma->open_count) && 137 !i915_vma_is_ggtt(vma)) 138 i915_vma_close(vma); 139 i915_gem_object_put(obj); 140 } 141 142 i915_gem_object_put(obj); 143 } 144 rcu_read_unlock(); 145 } 146 147 static struct intel_context * 148 lookup_user_engine(struct i915_gem_context *ctx, 149 unsigned long flags, 150 const struct i915_engine_class_instance *ci) 151 #define LOOKUP_USER_INDEX BIT(0) 152 { 153 int idx; 154 155 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 156 return ERR_PTR(-EINVAL); 157 158 if (!i915_gem_context_user_engines(ctx)) { 159 struct intel_engine_cs *engine; 160 161 engine = intel_engine_lookup_user(ctx->i915, 162 ci->engine_class, 163 ci->engine_instance); 164 if (!engine) 165 return ERR_PTR(-EINVAL); 166 167 idx = engine->legacy_idx; 168 } else { 169 idx = ci->engine_instance; 170 } 171 172 return i915_gem_context_get_engine(ctx, idx); 173 } 174 175 static struct i915_address_space * 176 context_get_vm_rcu(struct i915_gem_context *ctx) 177 { 178 GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); 179 180 do { 181 struct i915_address_space *vm; 182 183 /* 184 * We do not allow downgrading from full-ppgtt [to a shared 185 * global gtt], so ctx->vm cannot become NULL. 186 */ 187 vm = rcu_dereference(ctx->vm); 188 if (!kref_get_unless_zero(&vm->ref)) 189 continue; 190 191 /* 192 * This ppgtt may have be reallocated between 193 * the read and the kref, and reassigned to a third 194 * context. In order to avoid inadvertent sharing 195 * of this ppgtt with that third context (and not 196 * src), we have to confirm that we have the same 197 * ppgtt after passing through the strong memory 198 * barrier implied by a successful 199 * kref_get_unless_zero(). 200 * 201 * Once we have acquired the current ppgtt of ctx, 202 * we no longer care if it is released from ctx, as 203 * it cannot be reallocated elsewhere. 204 */ 205 206 if (vm == rcu_access_pointer(ctx->vm)) 207 return rcu_pointer_handoff(vm); 208 209 i915_vm_put(vm); 210 } while (1); 211 } 212 213 static void intel_context_set_gem(struct intel_context *ce, 214 struct i915_gem_context *ctx) 215 { 216 GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 217 RCU_INIT_POINTER(ce->gem_context, ctx); 218 219 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 220 ce->ring = __intel_context_ring_size(SZ_16K); 221 222 if (rcu_access_pointer(ctx->vm)) { 223 struct i915_address_space *vm; 224 225 rcu_read_lock(); 226 vm = context_get_vm_rcu(ctx); /* hmm */ 227 rcu_read_unlock(); 228 229 i915_vm_put(ce->vm); 230 ce->vm = vm; 231 } 232 233 GEM_BUG_ON(ce->timeline); 234 if (ctx->timeline) 235 ce->timeline = intel_timeline_get(ctx->timeline); 236 237 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 238 intel_engine_has_semaphores(ce->engine)) 239 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 240 } 241 242 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 243 { 244 while (count--) { 245 if (!e->engines[count]) 246 continue; 247 248 RCU_INIT_POINTER(e->engines[count]->gem_context, NULL); 249 intel_context_put(e->engines[count]); 250 } 251 kfree(e); 252 } 253 254 static void free_engines(struct i915_gem_engines *e) 255 { 256 __free_engines(e, e->num_engines); 257 } 258 259 static void free_engines_rcu(struct rcu_head *rcu) 260 { 261 free_engines(container_of(rcu, struct i915_gem_engines, rcu)); 262 } 263 264 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) 265 { 266 const struct intel_gt *gt = &ctx->i915->gt; 267 struct intel_engine_cs *engine; 268 struct i915_gem_engines *e; 269 enum intel_engine_id id; 270 271 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL); 272 if (!e) 273 return ERR_PTR(-ENOMEM); 274 275 init_rcu_head(&e->rcu); 276 for_each_engine(engine, gt, id) { 277 struct intel_context *ce; 278 279 if (engine->legacy_idx == INVALID_ENGINE) 280 continue; 281 282 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 283 GEM_BUG_ON(e->engines[engine->legacy_idx]); 284 285 ce = intel_context_create(engine); 286 if (IS_ERR(ce)) { 287 __free_engines(e, e->num_engines + 1); 288 return ERR_CAST(ce); 289 } 290 291 intel_context_set_gem(ce, ctx); 292 293 e->engines[engine->legacy_idx] = ce; 294 e->num_engines = max(e->num_engines, engine->legacy_idx); 295 } 296 e->num_engines++; 297 298 return e; 299 } 300 301 static void i915_gem_context_free(struct i915_gem_context *ctx) 302 { 303 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 304 305 spin_lock(&ctx->i915->gem.contexts.lock); 306 list_del(&ctx->link); 307 spin_unlock(&ctx->i915->gem.contexts.lock); 308 309 free_engines(rcu_access_pointer(ctx->engines)); 310 mutex_destroy(&ctx->engines_mutex); 311 312 if (ctx->timeline) 313 intel_timeline_put(ctx->timeline); 314 315 put_pid(ctx->pid); 316 mutex_destroy(&ctx->mutex); 317 318 kfree_rcu(ctx, rcu); 319 } 320 321 static void contexts_free_all(struct llist_node *list) 322 { 323 struct i915_gem_context *ctx, *cn; 324 325 llist_for_each_entry_safe(ctx, cn, list, free_link) 326 i915_gem_context_free(ctx); 327 } 328 329 static void contexts_flush_free(struct i915_gem_contexts *gc) 330 { 331 contexts_free_all(llist_del_all(&gc->free_list)); 332 } 333 334 static void contexts_free_worker(struct work_struct *work) 335 { 336 struct i915_gem_contexts *gc = 337 container_of(work, typeof(*gc), free_work); 338 339 contexts_flush_free(gc); 340 } 341 342 void i915_gem_context_release(struct kref *ref) 343 { 344 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 345 struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; 346 347 trace_i915_context_free(ctx); 348 if (llist_add(&ctx->free_link, &gc->free_list)) 349 schedule_work(&gc->free_work); 350 } 351 352 static inline struct i915_gem_engines * 353 __context_engines_static(const struct i915_gem_context *ctx) 354 { 355 return rcu_dereference_protected(ctx->engines, true); 356 } 357 358 static bool __reset_engine(struct intel_engine_cs *engine) 359 { 360 struct intel_gt *gt = engine->gt; 361 bool success = false; 362 363 if (!intel_has_reset_engine(gt)) 364 return false; 365 366 if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, 367 >->reset.flags)) { 368 success = intel_engine_reset(engine, NULL) == 0; 369 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, 370 >->reset.flags); 371 } 372 373 return success; 374 } 375 376 static void __reset_context(struct i915_gem_context *ctx, 377 struct intel_engine_cs *engine) 378 { 379 intel_gt_handle_error(engine->gt, engine->mask, 0, 380 "context closure in %s", ctx->name); 381 } 382 383 static bool __cancel_engine(struct intel_engine_cs *engine) 384 { 385 /* 386 * Send a "high priority pulse" down the engine to cause the 387 * current request to be momentarily preempted. (If it fails to 388 * be preempted, it will be reset). As we have marked our context 389 * as banned, any incomplete request, including any running, will 390 * be skipped following the preemption. 391 * 392 * If there is no hangchecking (one of the reasons why we try to 393 * cancel the context) and no forced preemption, there may be no 394 * means by which we reset the GPU and evict the persistent hog. 395 * Ergo if we are unable to inject a preemptive pulse that can 396 * kill the banned context, we fallback to doing a local reset 397 * instead. 398 */ 399 if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && 400 !intel_engine_pulse(engine)) 401 return true; 402 403 /* If we are unable to send a pulse, try resetting this engine. */ 404 return __reset_engine(engine); 405 } 406 407 static struct intel_engine_cs *__active_engine(struct i915_request *rq) 408 { 409 struct intel_engine_cs *engine, *locked; 410 411 /* 412 * Serialise with __i915_request_submit() so that it sees 413 * is-banned?, or we know the request is already inflight. 414 */ 415 locked = READ_ONCE(rq->engine); 416 spin_lock_irq(&locked->active.lock); 417 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 418 spin_unlock(&locked->active.lock); 419 spin_lock(&engine->active.lock); 420 locked = engine; 421 } 422 423 engine = NULL; 424 if (i915_request_is_active(rq) && !rq->fence.error) 425 engine = rq->engine; 426 427 spin_unlock_irq(&locked->active.lock); 428 429 return engine; 430 } 431 432 static struct intel_engine_cs *active_engine(struct intel_context *ce) 433 { 434 struct intel_engine_cs *engine = NULL; 435 struct i915_request *rq; 436 437 if (!ce->timeline) 438 return NULL; 439 440 mutex_lock(&ce->timeline->mutex); 441 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 442 if (i915_request_completed(rq)) 443 break; 444 445 /* Check with the backend if the request is inflight */ 446 engine = __active_engine(rq); 447 if (engine) 448 break; 449 } 450 mutex_unlock(&ce->timeline->mutex); 451 452 return engine; 453 } 454 455 static void kill_context(struct i915_gem_context *ctx) 456 { 457 struct i915_gem_engines_iter it; 458 struct intel_context *ce; 459 460 /* 461 * Map the user's engine back to the actual engines; one virtual 462 * engine will be mapped to multiple engines, and using ctx->engine[] 463 * the same engine may be have multiple instances in the user's map. 464 * However, we only care about pending requests, so only include 465 * engines on which there are incomplete requests. 466 */ 467 for_each_gem_engine(ce, __context_engines_static(ctx), it) { 468 struct intel_engine_cs *engine; 469 470 if (intel_context_set_banned(ce)) 471 continue; 472 473 /* 474 * Check the current active state of this context; if we 475 * are currently executing on the GPU we need to evict 476 * ourselves. On the other hand, if we haven't yet been 477 * submitted to the GPU or if everything is complete, 478 * we have nothing to do. 479 */ 480 engine = active_engine(ce); 481 482 /* First attempt to gracefully cancel the context */ 483 if (engine && !__cancel_engine(engine)) 484 /* 485 * If we are unable to send a preemptive pulse to bump 486 * the context from the GPU, we have to resort to a full 487 * reset. We hope the collateral damage is worth it. 488 */ 489 __reset_context(ctx, engine); 490 } 491 } 492 493 static void set_closed_name(struct i915_gem_context *ctx) 494 { 495 char *s; 496 497 /* Replace '[]' with '<>' to indicate closed in debug prints */ 498 499 s = strrchr(ctx->name, '['); 500 if (!s) 501 return; 502 503 *s = '<'; 504 505 s = strchr(s + 1, ']'); 506 if (s) 507 *s = '>'; 508 } 509 510 static void context_close(struct i915_gem_context *ctx) 511 { 512 struct i915_address_space *vm; 513 514 i915_gem_context_set_closed(ctx); 515 set_closed_name(ctx); 516 517 mutex_lock(&ctx->mutex); 518 519 vm = i915_gem_context_vm(ctx); 520 if (vm) 521 i915_vm_close(vm); 522 523 ctx->file_priv = ERR_PTR(-EBADF); 524 525 /* 526 * The LUT uses the VMA as a backpointer to unref the object, 527 * so we need to clear the LUT before we close all the VMA (inside 528 * the ppgtt). 529 */ 530 lut_close(ctx); 531 532 mutex_unlock(&ctx->mutex); 533 534 /* 535 * If the user has disabled hangchecking, we can not be sure that 536 * the batches will ever complete after the context is closed, 537 * keeping the context and all resources pinned forever. So in this 538 * case we opt to forcibly kill off all remaining requests on 539 * context close. 540 */ 541 if (!i915_gem_context_is_persistent(ctx) || 542 !i915_modparams.enable_hangcheck) 543 kill_context(ctx); 544 545 i915_gem_context_put(ctx); 546 } 547 548 static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 549 { 550 if (i915_gem_context_is_persistent(ctx) == state) 551 return 0; 552 553 if (state) { 554 /* 555 * Only contexts that are short-lived [that will expire or be 556 * reset] are allowed to survive past termination. We require 557 * hangcheck to ensure that the persistent requests are healthy. 558 */ 559 if (!i915_modparams.enable_hangcheck) 560 return -EINVAL; 561 562 i915_gem_context_set_persistence(ctx); 563 } else { 564 /* To cancel a context we use "preempt-to-idle" */ 565 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 566 return -ENODEV; 567 568 /* 569 * If the cancel fails, we then need to reset, cleanly! 570 * 571 * If the per-engine reset fails, all hope is lost! We resort 572 * to a full GPU reset in that unlikely case, but realistically 573 * if the engine could not reset, the full reset does not fare 574 * much better. The damage has been done. 575 * 576 * However, if we cannot reset an engine by itself, we cannot 577 * cleanup a hanging persistent context without causing 578 * colateral damage, and we should not pretend we can by 579 * exposing the interface. 580 */ 581 if (!intel_has_reset_engine(&ctx->i915->gt)) 582 return -ENODEV; 583 584 i915_gem_context_clear_persistence(ctx); 585 } 586 587 return 0; 588 } 589 590 static struct i915_gem_context * 591 __create_context(struct drm_i915_private *i915) 592 { 593 struct i915_gem_context *ctx; 594 struct i915_gem_engines *e; 595 int err; 596 int i; 597 598 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 599 if (!ctx) 600 return ERR_PTR(-ENOMEM); 601 602 kref_init(&ctx->ref); 603 ctx->i915 = i915; 604 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); 605 mutex_init(&ctx->mutex); 606 607 mutex_init(&ctx->engines_mutex); 608 e = default_engines(ctx); 609 if (IS_ERR(e)) { 610 err = PTR_ERR(e); 611 goto err_free; 612 } 613 RCU_INIT_POINTER(ctx->engines, e); 614 615 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 616 617 /* NB: Mark all slices as needing a remap so that when the context first 618 * loads it will restore whatever remap state already exists. If there 619 * is no remap info, it will be a NOP. */ 620 ctx->remap_slice = ALL_L3_SLICES(i915); 621 622 i915_gem_context_set_bannable(ctx); 623 i915_gem_context_set_recoverable(ctx); 624 __context_set_persistence(ctx, true /* cgroup hook? */); 625 626 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 627 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 628 629 spin_lock(&i915->gem.contexts.lock); 630 list_add_tail(&ctx->link, &i915->gem.contexts.list); 631 spin_unlock(&i915->gem.contexts.lock); 632 633 return ctx; 634 635 err_free: 636 kfree(ctx); 637 return ERR_PTR(err); 638 } 639 640 static void 641 context_apply_all(struct i915_gem_context *ctx, 642 void (*fn)(struct intel_context *ce, void *data), 643 void *data) 644 { 645 struct i915_gem_engines_iter it; 646 struct intel_context *ce; 647 648 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) 649 fn(ce, data); 650 i915_gem_context_unlock_engines(ctx); 651 } 652 653 static void __apply_ppgtt(struct intel_context *ce, void *vm) 654 { 655 i915_vm_put(ce->vm); 656 ce->vm = i915_vm_get(vm); 657 } 658 659 static struct i915_address_space * 660 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) 661 { 662 struct i915_address_space *old = i915_gem_context_vm(ctx); 663 664 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); 665 666 rcu_assign_pointer(ctx->vm, i915_vm_open(vm)); 667 context_apply_all(ctx, __apply_ppgtt, vm); 668 669 return old; 670 } 671 672 static void __assign_ppgtt(struct i915_gem_context *ctx, 673 struct i915_address_space *vm) 674 { 675 if (vm == rcu_access_pointer(ctx->vm)) 676 return; 677 678 vm = __set_ppgtt(ctx, vm); 679 if (vm) 680 i915_vm_close(vm); 681 } 682 683 static void __set_timeline(struct intel_timeline **dst, 684 struct intel_timeline *src) 685 { 686 struct intel_timeline *old = *dst; 687 688 *dst = src ? intel_timeline_get(src) : NULL; 689 690 if (old) 691 intel_timeline_put(old); 692 } 693 694 static void __apply_timeline(struct intel_context *ce, void *timeline) 695 { 696 __set_timeline(&ce->timeline, timeline); 697 } 698 699 static void __assign_timeline(struct i915_gem_context *ctx, 700 struct intel_timeline *timeline) 701 { 702 __set_timeline(&ctx->timeline, timeline); 703 context_apply_all(ctx, __apply_timeline, timeline); 704 } 705 706 static struct i915_gem_context * 707 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) 708 { 709 struct i915_gem_context *ctx; 710 711 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && 712 !HAS_EXECLISTS(i915)) 713 return ERR_PTR(-EINVAL); 714 715 /* Reap the stale contexts */ 716 contexts_flush_free(&i915->gem.contexts); 717 718 ctx = __create_context(i915); 719 if (IS_ERR(ctx)) 720 return ctx; 721 722 if (HAS_FULL_PPGTT(i915)) { 723 struct i915_ppgtt *ppgtt; 724 725 ppgtt = i915_ppgtt_create(&i915->gt); 726 if (IS_ERR(ppgtt)) { 727 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 728 PTR_ERR(ppgtt)); 729 context_close(ctx); 730 return ERR_CAST(ppgtt); 731 } 732 733 mutex_lock(&ctx->mutex); 734 __assign_ppgtt(ctx, &ppgtt->vm); 735 mutex_unlock(&ctx->mutex); 736 737 i915_vm_put(&ppgtt->vm); 738 } 739 740 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 741 struct intel_timeline *timeline; 742 743 timeline = intel_timeline_create(&i915->gt, NULL); 744 if (IS_ERR(timeline)) { 745 context_close(ctx); 746 return ERR_CAST(timeline); 747 } 748 749 __assign_timeline(ctx, timeline); 750 intel_timeline_put(timeline); 751 } 752 753 trace_i915_context_create(ctx); 754 755 return ctx; 756 } 757 758 static void init_contexts(struct i915_gem_contexts *gc) 759 { 760 spin_lock_init(&gc->lock); 761 INIT_LIST_HEAD(&gc->list); 762 763 INIT_WORK(&gc->free_work, contexts_free_worker); 764 init_llist_head(&gc->free_list); 765 } 766 767 void i915_gem_init__contexts(struct drm_i915_private *i915) 768 { 769 init_contexts(&i915->gem.contexts); 770 DRM_DEBUG_DRIVER("%s context support initialized\n", 771 DRIVER_CAPS(i915)->has_logical_contexts ? 772 "logical" : "fake"); 773 } 774 775 void i915_gem_driver_release__contexts(struct drm_i915_private *i915) 776 { 777 flush_work(&i915->gem.contexts.free_work); 778 } 779 780 static int vm_idr_cleanup(int id, void *p, void *data) 781 { 782 i915_vm_put(p); 783 return 0; 784 } 785 786 static int gem_context_register(struct i915_gem_context *ctx, 787 struct drm_i915_file_private *fpriv, 788 u32 *id) 789 { 790 struct i915_address_space *vm; 791 int ret; 792 793 ctx->file_priv = fpriv; 794 795 mutex_lock(&ctx->mutex); 796 vm = i915_gem_context_vm(ctx); 797 if (vm) 798 WRITE_ONCE(vm->file, fpriv); /* XXX */ 799 mutex_unlock(&ctx->mutex); 800 801 ctx->pid = get_task_pid(current, PIDTYPE_PID); 802 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 803 current->comm, pid_nr(ctx->pid)); 804 805 /* And finally expose ourselves to userspace via the idr */ 806 ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); 807 if (ret) 808 put_pid(fetch_and_zero(&ctx->pid)); 809 810 return ret; 811 } 812 813 int i915_gem_context_open(struct drm_i915_private *i915, 814 struct drm_file *file) 815 { 816 struct drm_i915_file_private *file_priv = file->driver_priv; 817 struct i915_gem_context *ctx; 818 int err; 819 u32 id; 820 821 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); 822 823 mutex_init(&file_priv->vm_idr_lock); 824 idr_init_base(&file_priv->vm_idr, 1); 825 826 ctx = i915_gem_create_context(i915, 0); 827 if (IS_ERR(ctx)) { 828 err = PTR_ERR(ctx); 829 goto err; 830 } 831 832 err = gem_context_register(ctx, file_priv, &id); 833 if (err < 0) 834 goto err_ctx; 835 836 GEM_BUG_ON(id); 837 return 0; 838 839 err_ctx: 840 context_close(ctx); 841 err: 842 idr_destroy(&file_priv->vm_idr); 843 xa_destroy(&file_priv->context_xa); 844 mutex_destroy(&file_priv->vm_idr_lock); 845 return err; 846 } 847 848 void i915_gem_context_close(struct drm_file *file) 849 { 850 struct drm_i915_file_private *file_priv = file->driver_priv; 851 struct drm_i915_private *i915 = file_priv->dev_priv; 852 struct i915_gem_context *ctx; 853 unsigned long idx; 854 855 xa_for_each(&file_priv->context_xa, idx, ctx) 856 context_close(ctx); 857 xa_destroy(&file_priv->context_xa); 858 859 idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); 860 idr_destroy(&file_priv->vm_idr); 861 mutex_destroy(&file_priv->vm_idr_lock); 862 863 contexts_flush_free(&i915->gem.contexts); 864 } 865 866 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 867 struct drm_file *file) 868 { 869 struct drm_i915_private *i915 = to_i915(dev); 870 struct drm_i915_gem_vm_control *args = data; 871 struct drm_i915_file_private *file_priv = file->driver_priv; 872 struct i915_ppgtt *ppgtt; 873 int err; 874 875 if (!HAS_FULL_PPGTT(i915)) 876 return -ENODEV; 877 878 if (args->flags) 879 return -EINVAL; 880 881 ppgtt = i915_ppgtt_create(&i915->gt); 882 if (IS_ERR(ppgtt)) 883 return PTR_ERR(ppgtt); 884 885 ppgtt->vm.file = file_priv; 886 887 if (args->extensions) { 888 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 889 NULL, 0, 890 ppgtt); 891 if (err) 892 goto err_put; 893 } 894 895 err = mutex_lock_interruptible(&file_priv->vm_idr_lock); 896 if (err) 897 goto err_put; 898 899 err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL); 900 if (err < 0) 901 goto err_unlock; 902 903 GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */ 904 905 mutex_unlock(&file_priv->vm_idr_lock); 906 907 args->vm_id = err; 908 return 0; 909 910 err_unlock: 911 mutex_unlock(&file_priv->vm_idr_lock); 912 err_put: 913 i915_vm_put(&ppgtt->vm); 914 return err; 915 } 916 917 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 918 struct drm_file *file) 919 { 920 struct drm_i915_file_private *file_priv = file->driver_priv; 921 struct drm_i915_gem_vm_control *args = data; 922 struct i915_address_space *vm; 923 int err; 924 u32 id; 925 926 if (args->flags) 927 return -EINVAL; 928 929 if (args->extensions) 930 return -EINVAL; 931 932 id = args->vm_id; 933 if (!id) 934 return -ENOENT; 935 936 err = mutex_lock_interruptible(&file_priv->vm_idr_lock); 937 if (err) 938 return err; 939 940 vm = idr_remove(&file_priv->vm_idr, id); 941 942 mutex_unlock(&file_priv->vm_idr_lock); 943 if (!vm) 944 return -ENOENT; 945 946 i915_vm_put(vm); 947 return 0; 948 } 949 950 struct context_barrier_task { 951 struct i915_active base; 952 void (*task)(void *data); 953 void *data; 954 }; 955 956 __i915_active_call 957 static void cb_retire(struct i915_active *base) 958 { 959 struct context_barrier_task *cb = container_of(base, typeof(*cb), base); 960 961 if (cb->task) 962 cb->task(cb->data); 963 964 i915_active_fini(&cb->base); 965 kfree(cb); 966 } 967 968 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); 969 static int context_barrier_task(struct i915_gem_context *ctx, 970 intel_engine_mask_t engines, 971 bool (*skip)(struct intel_context *ce, void *data), 972 int (*emit)(struct i915_request *rq, void *data), 973 void (*task)(void *data), 974 void *data) 975 { 976 struct context_barrier_task *cb; 977 struct i915_gem_engines_iter it; 978 struct intel_context *ce; 979 int err = 0; 980 981 GEM_BUG_ON(!task); 982 983 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 984 if (!cb) 985 return -ENOMEM; 986 987 i915_active_init(&cb->base, NULL, cb_retire); 988 err = i915_active_acquire(&cb->base); 989 if (err) { 990 kfree(cb); 991 return err; 992 } 993 994 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 995 struct i915_request *rq; 996 997 if (I915_SELFTEST_ONLY(context_barrier_inject_fault & 998 ce->engine->mask)) { 999 err = -ENXIO; 1000 break; 1001 } 1002 1003 if (!(ce->engine->mask & engines)) 1004 continue; 1005 1006 if (skip && skip(ce, data)) 1007 continue; 1008 1009 rq = intel_context_create_request(ce); 1010 if (IS_ERR(rq)) { 1011 err = PTR_ERR(rq); 1012 break; 1013 } 1014 1015 err = 0; 1016 if (emit) 1017 err = emit(rq, data); 1018 if (err == 0) 1019 err = i915_active_add_request(&cb->base, rq); 1020 1021 i915_request_add(rq); 1022 if (err) 1023 break; 1024 } 1025 i915_gem_context_unlock_engines(ctx); 1026 1027 cb->task = err ? NULL : task; /* caller needs to unwind instead */ 1028 cb->data = data; 1029 1030 i915_active_release(&cb->base); 1031 1032 return err; 1033 } 1034 1035 static int get_ppgtt(struct drm_i915_file_private *file_priv, 1036 struct i915_gem_context *ctx, 1037 struct drm_i915_gem_context_param *args) 1038 { 1039 struct i915_address_space *vm; 1040 int ret; 1041 1042 if (!rcu_access_pointer(ctx->vm)) 1043 return -ENODEV; 1044 1045 rcu_read_lock(); 1046 vm = context_get_vm_rcu(ctx); 1047 rcu_read_unlock(); 1048 1049 ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); 1050 if (ret) 1051 goto err_put; 1052 1053 ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL); 1054 GEM_BUG_ON(!ret); 1055 if (ret < 0) 1056 goto err_unlock; 1057 1058 i915_vm_open(vm); 1059 1060 args->size = 0; 1061 args->value = ret; 1062 1063 ret = 0; 1064 err_unlock: 1065 mutex_unlock(&file_priv->vm_idr_lock); 1066 err_put: 1067 i915_vm_put(vm); 1068 return ret; 1069 } 1070 1071 static void set_ppgtt_barrier(void *data) 1072 { 1073 struct i915_address_space *old = data; 1074 1075 if (INTEL_GEN(old->i915) < 8) 1076 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); 1077 1078 i915_vm_close(old); 1079 } 1080 1081 static int emit_ppgtt_update(struct i915_request *rq, void *data) 1082 { 1083 struct i915_address_space *vm = rq->context->vm; 1084 struct intel_engine_cs *engine = rq->engine; 1085 u32 base = engine->mmio_base; 1086 u32 *cs; 1087 int i; 1088 1089 if (i915_vm_is_4lvl(vm)) { 1090 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1091 const dma_addr_t pd_daddr = px_dma(ppgtt->pd); 1092 1093 cs = intel_ring_begin(rq, 6); 1094 if (IS_ERR(cs)) 1095 return PTR_ERR(cs); 1096 1097 *cs++ = MI_LOAD_REGISTER_IMM(2); 1098 1099 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 1100 *cs++ = upper_32_bits(pd_daddr); 1101 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 1102 *cs++ = lower_32_bits(pd_daddr); 1103 1104 *cs++ = MI_NOOP; 1105 intel_ring_advance(rq, cs); 1106 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { 1107 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1108 int err; 1109 1110 /* Magic required to prevent forcewake errors! */ 1111 err = engine->emit_flush(rq, EMIT_INVALIDATE); 1112 if (err) 1113 return err; 1114 1115 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 1116 if (IS_ERR(cs)) 1117 return PTR_ERR(cs); 1118 1119 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 1120 for (i = GEN8_3LVL_PDPES; i--; ) { 1121 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1122 1123 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 1124 *cs++ = upper_32_bits(pd_daddr); 1125 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 1126 *cs++ = lower_32_bits(pd_daddr); 1127 } 1128 *cs++ = MI_NOOP; 1129 intel_ring_advance(rq, cs); 1130 } 1131 1132 return 0; 1133 } 1134 1135 static bool skip_ppgtt_update(struct intel_context *ce, void *data) 1136 { 1137 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 1138 return true; 1139 1140 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) 1141 return false; 1142 1143 if (!atomic_read(&ce->pin_count)) 1144 return true; 1145 1146 /* ppGTT is not part of the legacy context image */ 1147 if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm))) 1148 return true; 1149 1150 return false; 1151 } 1152 1153 static int set_ppgtt(struct drm_i915_file_private *file_priv, 1154 struct i915_gem_context *ctx, 1155 struct drm_i915_gem_context_param *args) 1156 { 1157 struct i915_address_space *vm, *old; 1158 int err; 1159 1160 if (args->size) 1161 return -EINVAL; 1162 1163 if (!rcu_access_pointer(ctx->vm)) 1164 return -ENODEV; 1165 1166 if (upper_32_bits(args->value)) 1167 return -ENOENT; 1168 1169 rcu_read_lock(); 1170 vm = idr_find(&file_priv->vm_idr, args->value); 1171 if (vm && !kref_get_unless_zero(&vm->ref)) 1172 vm = NULL; 1173 rcu_read_unlock(); 1174 if (!vm) 1175 return -ENOENT; 1176 1177 err = mutex_lock_interruptible(&ctx->mutex); 1178 if (err) 1179 goto out; 1180 1181 if (i915_gem_context_is_closed(ctx)) { 1182 err = -ENOENT; 1183 goto unlock; 1184 } 1185 1186 if (vm == rcu_access_pointer(ctx->vm)) 1187 goto unlock; 1188 1189 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ 1190 lut_close(ctx); 1191 1192 old = __set_ppgtt(ctx, vm); 1193 1194 /* 1195 * We need to flush any requests using the current ppgtt before 1196 * we release it as the requests do not hold a reference themselves, 1197 * only indirectly through the context. 1198 */ 1199 err = context_barrier_task(ctx, ALL_ENGINES, 1200 skip_ppgtt_update, 1201 emit_ppgtt_update, 1202 set_ppgtt_barrier, 1203 old); 1204 if (err) { 1205 i915_vm_close(__set_ppgtt(ctx, old)); 1206 i915_vm_close(old); 1207 } 1208 1209 unlock: 1210 mutex_unlock(&ctx->mutex); 1211 out: 1212 i915_vm_put(vm); 1213 return err; 1214 } 1215 1216 static int gen8_emit_rpcs_config(struct i915_request *rq, 1217 struct intel_context *ce, 1218 struct intel_sseu sseu) 1219 { 1220 u64 offset; 1221 u32 *cs; 1222 1223 cs = intel_ring_begin(rq, 4); 1224 if (IS_ERR(cs)) 1225 return PTR_ERR(cs); 1226 1227 offset = i915_ggtt_offset(ce->state) + 1228 LRC_STATE_PN * PAGE_SIZE + 1229 CTX_R_PWR_CLK_STATE * 4; 1230 1231 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1232 *cs++ = lower_32_bits(offset); 1233 *cs++ = upper_32_bits(offset); 1234 *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu); 1235 1236 intel_ring_advance(rq, cs); 1237 1238 return 0; 1239 } 1240 1241 static int 1242 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) 1243 { 1244 struct i915_request *rq; 1245 int ret; 1246 1247 lockdep_assert_held(&ce->pin_mutex); 1248 1249 /* 1250 * If the context is not idle, we have to submit an ordered request to 1251 * modify its context image via the kernel context (writing to our own 1252 * image, or into the registers directory, does not stick). Pristine 1253 * and idle contexts will be configured on pinning. 1254 */ 1255 if (!intel_context_pin_if_active(ce)) 1256 return 0; 1257 1258 rq = intel_engine_create_kernel_request(ce->engine); 1259 if (IS_ERR(rq)) { 1260 ret = PTR_ERR(rq); 1261 goto out_unpin; 1262 } 1263 1264 /* Serialise with the remote context */ 1265 ret = intel_context_prepare_remote_request(ce, rq); 1266 if (ret == 0) 1267 ret = gen8_emit_rpcs_config(rq, ce, sseu); 1268 1269 i915_request_add(rq); 1270 out_unpin: 1271 intel_context_unpin(ce); 1272 return ret; 1273 } 1274 1275 static int 1276 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) 1277 { 1278 int ret; 1279 1280 GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8); 1281 1282 ret = intel_context_lock_pinned(ce); 1283 if (ret) 1284 return ret; 1285 1286 /* Nothing to do if unmodified. */ 1287 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) 1288 goto unlock; 1289 1290 ret = gen8_modify_rpcs(ce, sseu); 1291 if (!ret) 1292 ce->sseu = sseu; 1293 1294 unlock: 1295 intel_context_unlock_pinned(ce); 1296 return ret; 1297 } 1298 1299 static int 1300 user_to_context_sseu(struct drm_i915_private *i915, 1301 const struct drm_i915_gem_context_param_sseu *user, 1302 struct intel_sseu *context) 1303 { 1304 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; 1305 1306 /* No zeros in any field. */ 1307 if (!user->slice_mask || !user->subslice_mask || 1308 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1309 return -EINVAL; 1310 1311 /* Max > min. */ 1312 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1313 return -EINVAL; 1314 1315 /* 1316 * Some future proofing on the types since the uAPI is wider than the 1317 * current internal implementation. 1318 */ 1319 if (overflows_type(user->slice_mask, context->slice_mask) || 1320 overflows_type(user->subslice_mask, context->subslice_mask) || 1321 overflows_type(user->min_eus_per_subslice, 1322 context->min_eus_per_subslice) || 1323 overflows_type(user->max_eus_per_subslice, 1324 context->max_eus_per_subslice)) 1325 return -EINVAL; 1326 1327 /* Check validity against hardware. */ 1328 if (user->slice_mask & ~device->slice_mask) 1329 return -EINVAL; 1330 1331 if (user->subslice_mask & ~device->subslice_mask[0]) 1332 return -EINVAL; 1333 1334 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1335 return -EINVAL; 1336 1337 context->slice_mask = user->slice_mask; 1338 context->subslice_mask = user->subslice_mask; 1339 context->min_eus_per_subslice = user->min_eus_per_subslice; 1340 context->max_eus_per_subslice = user->max_eus_per_subslice; 1341 1342 /* Part specific restrictions. */ 1343 if (IS_GEN(i915, 11)) { 1344 unsigned int hw_s = hweight8(device->slice_mask); 1345 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 1346 unsigned int req_s = hweight8(context->slice_mask); 1347 unsigned int req_ss = hweight8(context->subslice_mask); 1348 1349 /* 1350 * Only full subslice enablement is possible if more than one 1351 * slice is turned on. 1352 */ 1353 if (req_s > 1 && req_ss != hw_ss_per_s) 1354 return -EINVAL; 1355 1356 /* 1357 * If more than four (SScount bitfield limit) subslices are 1358 * requested then the number has to be even. 1359 */ 1360 if (req_ss > 4 && (req_ss & 1)) 1361 return -EINVAL; 1362 1363 /* 1364 * If only one slice is enabled and subslice count is below the 1365 * device full enablement, it must be at most half of the all 1366 * available subslices. 1367 */ 1368 if (req_s == 1 && req_ss < hw_ss_per_s && 1369 req_ss > (hw_ss_per_s / 2)) 1370 return -EINVAL; 1371 1372 /* ABI restriction - VME use case only. */ 1373 1374 /* All slices or one slice only. */ 1375 if (req_s != 1 && req_s != hw_s) 1376 return -EINVAL; 1377 1378 /* 1379 * Half subslices or full enablement only when one slice is 1380 * enabled. 1381 */ 1382 if (req_s == 1 && 1383 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1384 return -EINVAL; 1385 1386 /* No EU configuration changes. */ 1387 if ((user->min_eus_per_subslice != 1388 device->max_eus_per_subslice) || 1389 (user->max_eus_per_subslice != 1390 device->max_eus_per_subslice)) 1391 return -EINVAL; 1392 } 1393 1394 return 0; 1395 } 1396 1397 static int set_sseu(struct i915_gem_context *ctx, 1398 struct drm_i915_gem_context_param *args) 1399 { 1400 struct drm_i915_private *i915 = ctx->i915; 1401 struct drm_i915_gem_context_param_sseu user_sseu; 1402 struct intel_context *ce; 1403 struct intel_sseu sseu; 1404 unsigned long lookup; 1405 int ret; 1406 1407 if (args->size < sizeof(user_sseu)) 1408 return -EINVAL; 1409 1410 if (!IS_GEN(i915, 11)) 1411 return -ENODEV; 1412 1413 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1414 sizeof(user_sseu))) 1415 return -EFAULT; 1416 1417 if (user_sseu.rsvd) 1418 return -EINVAL; 1419 1420 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1421 return -EINVAL; 1422 1423 lookup = 0; 1424 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 1425 lookup |= LOOKUP_USER_INDEX; 1426 1427 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 1428 if (IS_ERR(ce)) 1429 return PTR_ERR(ce); 1430 1431 /* Only render engine supports RPCS configuration. */ 1432 if (ce->engine->class != RENDER_CLASS) { 1433 ret = -ENODEV; 1434 goto out_ce; 1435 } 1436 1437 ret = user_to_context_sseu(i915, &user_sseu, &sseu); 1438 if (ret) 1439 goto out_ce; 1440 1441 ret = intel_context_reconfigure_sseu(ce, sseu); 1442 if (ret) 1443 goto out_ce; 1444 1445 args->size = sizeof(user_sseu); 1446 1447 out_ce: 1448 intel_context_put(ce); 1449 return ret; 1450 } 1451 1452 struct set_engines { 1453 struct i915_gem_context *ctx; 1454 struct i915_gem_engines *engines; 1455 }; 1456 1457 static int 1458 set_engines__load_balance(struct i915_user_extension __user *base, void *data) 1459 { 1460 struct i915_context_engines_load_balance __user *ext = 1461 container_of_user(base, typeof(*ext), base); 1462 const struct set_engines *set = data; 1463 struct intel_engine_cs *stack[16]; 1464 struct intel_engine_cs **siblings; 1465 struct intel_context *ce; 1466 u16 num_siblings, idx; 1467 unsigned int n; 1468 int err; 1469 1470 if (!HAS_EXECLISTS(set->ctx->i915)) 1471 return -ENODEV; 1472 1473 if (USES_GUC_SUBMISSION(set->ctx->i915)) 1474 return -ENODEV; /* not implement yet */ 1475 1476 if (get_user(idx, &ext->engine_index)) 1477 return -EFAULT; 1478 1479 if (idx >= set->engines->num_engines) { 1480 DRM_DEBUG("Invalid placement value, %d >= %d\n", 1481 idx, set->engines->num_engines); 1482 return -EINVAL; 1483 } 1484 1485 idx = array_index_nospec(idx, set->engines->num_engines); 1486 if (set->engines->engines[idx]) { 1487 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx); 1488 return -EEXIST; 1489 } 1490 1491 if (get_user(num_siblings, &ext->num_siblings)) 1492 return -EFAULT; 1493 1494 err = check_user_mbz(&ext->flags); 1495 if (err) 1496 return err; 1497 1498 err = check_user_mbz(&ext->mbz64); 1499 if (err) 1500 return err; 1501 1502 siblings = stack; 1503 if (num_siblings > ARRAY_SIZE(stack)) { 1504 siblings = kmalloc_array(num_siblings, 1505 sizeof(*siblings), 1506 GFP_KERNEL); 1507 if (!siblings) 1508 return -ENOMEM; 1509 } 1510 1511 for (n = 0; n < num_siblings; n++) { 1512 struct i915_engine_class_instance ci; 1513 1514 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 1515 err = -EFAULT; 1516 goto out_siblings; 1517 } 1518 1519 siblings[n] = intel_engine_lookup_user(set->ctx->i915, 1520 ci.engine_class, 1521 ci.engine_instance); 1522 if (!siblings[n]) { 1523 DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n", 1524 n, ci.engine_class, ci.engine_instance); 1525 err = -EINVAL; 1526 goto out_siblings; 1527 } 1528 } 1529 1530 ce = intel_execlists_create_virtual(siblings, n); 1531 if (IS_ERR(ce)) { 1532 err = PTR_ERR(ce); 1533 goto out_siblings; 1534 } 1535 1536 intel_context_set_gem(ce, set->ctx); 1537 1538 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { 1539 intel_context_put(ce); 1540 err = -EEXIST; 1541 goto out_siblings; 1542 } 1543 1544 out_siblings: 1545 if (siblings != stack) 1546 kfree(siblings); 1547 1548 return err; 1549 } 1550 1551 static int 1552 set_engines__bond(struct i915_user_extension __user *base, void *data) 1553 { 1554 struct i915_context_engines_bond __user *ext = 1555 container_of_user(base, typeof(*ext), base); 1556 const struct set_engines *set = data; 1557 struct i915_engine_class_instance ci; 1558 struct intel_engine_cs *virtual; 1559 struct intel_engine_cs *master; 1560 u16 idx, num_bonds; 1561 int err, n; 1562 1563 if (get_user(idx, &ext->virtual_index)) 1564 return -EFAULT; 1565 1566 if (idx >= set->engines->num_engines) { 1567 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n", 1568 idx, set->engines->num_engines); 1569 return -EINVAL; 1570 } 1571 1572 idx = array_index_nospec(idx, set->engines->num_engines); 1573 if (!set->engines->engines[idx]) { 1574 DRM_DEBUG("Invalid engine at %d\n", idx); 1575 return -EINVAL; 1576 } 1577 virtual = set->engines->engines[idx]->engine; 1578 1579 err = check_user_mbz(&ext->flags); 1580 if (err) 1581 return err; 1582 1583 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 1584 err = check_user_mbz(&ext->mbz64[n]); 1585 if (err) 1586 return err; 1587 } 1588 1589 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 1590 return -EFAULT; 1591 1592 master = intel_engine_lookup_user(set->ctx->i915, 1593 ci.engine_class, ci.engine_instance); 1594 if (!master) { 1595 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n", 1596 ci.engine_class, ci.engine_instance); 1597 return -EINVAL; 1598 } 1599 1600 if (get_user(num_bonds, &ext->num_bonds)) 1601 return -EFAULT; 1602 1603 for (n = 0; n < num_bonds; n++) { 1604 struct intel_engine_cs *bond; 1605 1606 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 1607 return -EFAULT; 1608 1609 bond = intel_engine_lookup_user(set->ctx->i915, 1610 ci.engine_class, 1611 ci.engine_instance); 1612 if (!bond) { 1613 DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 1614 n, ci.engine_class, ci.engine_instance); 1615 return -EINVAL; 1616 } 1617 1618 /* 1619 * A non-virtual engine has no siblings to choose between; and 1620 * a submit fence will always be directed to the one engine. 1621 */ 1622 if (intel_engine_is_virtual(virtual)) { 1623 err = intel_virtual_engine_attach_bond(virtual, 1624 master, 1625 bond); 1626 if (err) 1627 return err; 1628 } 1629 } 1630 1631 return 0; 1632 } 1633 1634 static const i915_user_extension_fn set_engines__extensions[] = { 1635 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, 1636 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, 1637 }; 1638 1639 static int 1640 set_engines(struct i915_gem_context *ctx, 1641 const struct drm_i915_gem_context_param *args) 1642 { 1643 struct i915_context_param_engines __user *user = 1644 u64_to_user_ptr(args->value); 1645 struct set_engines set = { .ctx = ctx }; 1646 unsigned int num_engines, n; 1647 u64 extensions; 1648 int err; 1649 1650 if (!args->size) { /* switch back to legacy user_ring_map */ 1651 if (!i915_gem_context_user_engines(ctx)) 1652 return 0; 1653 1654 set.engines = default_engines(ctx); 1655 if (IS_ERR(set.engines)) 1656 return PTR_ERR(set.engines); 1657 1658 goto replace; 1659 } 1660 1661 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); 1662 if (args->size < sizeof(*user) || 1663 !IS_ALIGNED(args->size, sizeof(*user->engines))) { 1664 DRM_DEBUG("Invalid size for engine array: %d\n", 1665 args->size); 1666 return -EINVAL; 1667 } 1668 1669 /* 1670 * Note that I915_EXEC_RING_MASK limits execbuf to only using the 1671 * first 64 engines defined here. 1672 */ 1673 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 1674 1675 set.engines = kmalloc(struct_size(set.engines, engines, num_engines), 1676 GFP_KERNEL); 1677 if (!set.engines) 1678 return -ENOMEM; 1679 1680 init_rcu_head(&set.engines->rcu); 1681 for (n = 0; n < num_engines; n++) { 1682 struct i915_engine_class_instance ci; 1683 struct intel_engine_cs *engine; 1684 struct intel_context *ce; 1685 1686 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 1687 __free_engines(set.engines, n); 1688 return -EFAULT; 1689 } 1690 1691 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 1692 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { 1693 set.engines->engines[n] = NULL; 1694 continue; 1695 } 1696 1697 engine = intel_engine_lookup_user(ctx->i915, 1698 ci.engine_class, 1699 ci.engine_instance); 1700 if (!engine) { 1701 DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n", 1702 n, ci.engine_class, ci.engine_instance); 1703 __free_engines(set.engines, n); 1704 return -ENOENT; 1705 } 1706 1707 ce = intel_context_create(engine); 1708 if (IS_ERR(ce)) { 1709 __free_engines(set.engines, n); 1710 return PTR_ERR(ce); 1711 } 1712 1713 intel_context_set_gem(ce, ctx); 1714 1715 set.engines->engines[n] = ce; 1716 } 1717 set.engines->num_engines = num_engines; 1718 1719 err = -EFAULT; 1720 if (!get_user(extensions, &user->extensions)) 1721 err = i915_user_extensions(u64_to_user_ptr(extensions), 1722 set_engines__extensions, 1723 ARRAY_SIZE(set_engines__extensions), 1724 &set); 1725 if (err) { 1726 free_engines(set.engines); 1727 return err; 1728 } 1729 1730 replace: 1731 mutex_lock(&ctx->engines_mutex); 1732 if (args->size) 1733 i915_gem_context_set_user_engines(ctx); 1734 else 1735 i915_gem_context_clear_user_engines(ctx); 1736 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); 1737 mutex_unlock(&ctx->engines_mutex); 1738 1739 call_rcu(&set.engines->rcu, free_engines_rcu); 1740 1741 return 0; 1742 } 1743 1744 static struct i915_gem_engines * 1745 __copy_engines(struct i915_gem_engines *e) 1746 { 1747 struct i915_gem_engines *copy; 1748 unsigned int n; 1749 1750 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 1751 if (!copy) 1752 return ERR_PTR(-ENOMEM); 1753 1754 init_rcu_head(©->rcu); 1755 for (n = 0; n < e->num_engines; n++) { 1756 if (e->engines[n]) 1757 copy->engines[n] = intel_context_get(e->engines[n]); 1758 else 1759 copy->engines[n] = NULL; 1760 } 1761 copy->num_engines = n; 1762 1763 return copy; 1764 } 1765 1766 static int 1767 get_engines(struct i915_gem_context *ctx, 1768 struct drm_i915_gem_context_param *args) 1769 { 1770 struct i915_context_param_engines __user *user; 1771 struct i915_gem_engines *e; 1772 size_t n, count, size; 1773 int err = 0; 1774 1775 err = mutex_lock_interruptible(&ctx->engines_mutex); 1776 if (err) 1777 return err; 1778 1779 e = NULL; 1780 if (i915_gem_context_user_engines(ctx)) 1781 e = __copy_engines(i915_gem_context_engines(ctx)); 1782 mutex_unlock(&ctx->engines_mutex); 1783 if (IS_ERR_OR_NULL(e)) { 1784 args->size = 0; 1785 return PTR_ERR_OR_ZERO(e); 1786 } 1787 1788 count = e->num_engines; 1789 1790 /* Be paranoid in case we have an impedance mismatch */ 1791 if (!check_struct_size(user, engines, count, &size)) { 1792 err = -EINVAL; 1793 goto err_free; 1794 } 1795 if (overflows_type(size, args->size)) { 1796 err = -EINVAL; 1797 goto err_free; 1798 } 1799 1800 if (!args->size) { 1801 args->size = size; 1802 goto err_free; 1803 } 1804 1805 if (args->size < size) { 1806 err = -EINVAL; 1807 goto err_free; 1808 } 1809 1810 user = u64_to_user_ptr(args->value); 1811 if (!access_ok(user, size)) { 1812 err = -EFAULT; 1813 goto err_free; 1814 } 1815 1816 if (put_user(0, &user->extensions)) { 1817 err = -EFAULT; 1818 goto err_free; 1819 } 1820 1821 for (n = 0; n < count; n++) { 1822 struct i915_engine_class_instance ci = { 1823 .engine_class = I915_ENGINE_CLASS_INVALID, 1824 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, 1825 }; 1826 1827 if (e->engines[n]) { 1828 ci.engine_class = e->engines[n]->engine->uabi_class; 1829 ci.engine_instance = e->engines[n]->engine->uabi_instance; 1830 } 1831 1832 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { 1833 err = -EFAULT; 1834 goto err_free; 1835 } 1836 } 1837 1838 args->size = size; 1839 1840 err_free: 1841 free_engines(e); 1842 return err; 1843 } 1844 1845 static int 1846 set_persistence(struct i915_gem_context *ctx, 1847 const struct drm_i915_gem_context_param *args) 1848 { 1849 if (args->size) 1850 return -EINVAL; 1851 1852 return __context_set_persistence(ctx, args->value); 1853 } 1854 1855 static void __apply_priority(struct intel_context *ce, void *arg) 1856 { 1857 struct i915_gem_context *ctx = arg; 1858 1859 if (!intel_engine_has_semaphores(ce->engine)) 1860 return; 1861 1862 if (ctx->sched.priority >= I915_PRIORITY_NORMAL) 1863 intel_context_set_use_semaphores(ce); 1864 else 1865 intel_context_clear_use_semaphores(ce); 1866 } 1867 1868 static int set_priority(struct i915_gem_context *ctx, 1869 const struct drm_i915_gem_context_param *args) 1870 { 1871 s64 priority = args->value; 1872 1873 if (args->size) 1874 return -EINVAL; 1875 1876 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 1877 return -ENODEV; 1878 1879 if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 1880 priority < I915_CONTEXT_MIN_USER_PRIORITY) 1881 return -EINVAL; 1882 1883 if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 1884 !capable(CAP_SYS_NICE)) 1885 return -EPERM; 1886 1887 ctx->sched.priority = I915_USER_PRIORITY(priority); 1888 context_apply_all(ctx, __apply_priority, ctx); 1889 1890 return 0; 1891 } 1892 1893 static int ctx_setparam(struct drm_i915_file_private *fpriv, 1894 struct i915_gem_context *ctx, 1895 struct drm_i915_gem_context_param *args) 1896 { 1897 int ret = 0; 1898 1899 switch (args->param) { 1900 case I915_CONTEXT_PARAM_NO_ZEROMAP: 1901 if (args->size) 1902 ret = -EINVAL; 1903 else if (args->value) 1904 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 1905 else 1906 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 1907 break; 1908 1909 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 1910 if (args->size) 1911 ret = -EINVAL; 1912 else if (args->value) 1913 i915_gem_context_set_no_error_capture(ctx); 1914 else 1915 i915_gem_context_clear_no_error_capture(ctx); 1916 break; 1917 1918 case I915_CONTEXT_PARAM_BANNABLE: 1919 if (args->size) 1920 ret = -EINVAL; 1921 else if (!capable(CAP_SYS_ADMIN) && !args->value) 1922 ret = -EPERM; 1923 else if (args->value) 1924 i915_gem_context_set_bannable(ctx); 1925 else 1926 i915_gem_context_clear_bannable(ctx); 1927 break; 1928 1929 case I915_CONTEXT_PARAM_RECOVERABLE: 1930 if (args->size) 1931 ret = -EINVAL; 1932 else if (args->value) 1933 i915_gem_context_set_recoverable(ctx); 1934 else 1935 i915_gem_context_clear_recoverable(ctx); 1936 break; 1937 1938 case I915_CONTEXT_PARAM_PRIORITY: 1939 ret = set_priority(ctx, args); 1940 break; 1941 1942 case I915_CONTEXT_PARAM_SSEU: 1943 ret = set_sseu(ctx, args); 1944 break; 1945 1946 case I915_CONTEXT_PARAM_VM: 1947 ret = set_ppgtt(fpriv, ctx, args); 1948 break; 1949 1950 case I915_CONTEXT_PARAM_ENGINES: 1951 ret = set_engines(ctx, args); 1952 break; 1953 1954 case I915_CONTEXT_PARAM_PERSISTENCE: 1955 ret = set_persistence(ctx, args); 1956 break; 1957 1958 case I915_CONTEXT_PARAM_BAN_PERIOD: 1959 default: 1960 ret = -EINVAL; 1961 break; 1962 } 1963 1964 return ret; 1965 } 1966 1967 struct create_ext { 1968 struct i915_gem_context *ctx; 1969 struct drm_i915_file_private *fpriv; 1970 }; 1971 1972 static int create_setparam(struct i915_user_extension __user *ext, void *data) 1973 { 1974 struct drm_i915_gem_context_create_ext_setparam local; 1975 const struct create_ext *arg = data; 1976 1977 if (copy_from_user(&local, ext, sizeof(local))) 1978 return -EFAULT; 1979 1980 if (local.param.ctx_id) 1981 return -EINVAL; 1982 1983 return ctx_setparam(arg->fpriv, arg->ctx, &local.param); 1984 } 1985 1986 static int clone_engines(struct i915_gem_context *dst, 1987 struct i915_gem_context *src) 1988 { 1989 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 1990 struct i915_gem_engines *clone; 1991 bool user_engines; 1992 unsigned long n; 1993 1994 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 1995 if (!clone) 1996 goto err_unlock; 1997 1998 init_rcu_head(&clone->rcu); 1999 for (n = 0; n < e->num_engines; n++) { 2000 struct intel_engine_cs *engine; 2001 2002 if (!e->engines[n]) { 2003 clone->engines[n] = NULL; 2004 continue; 2005 } 2006 engine = e->engines[n]->engine; 2007 2008 /* 2009 * Virtual engines are singletons; they can only exist 2010 * inside a single context, because they embed their 2011 * HW context... As each virtual context implies a single 2012 * timeline (each engine can only dequeue a single request 2013 * at any time), it would be surprising for two contexts 2014 * to use the same engine. So let's create a copy of 2015 * the virtual engine instead. 2016 */ 2017 if (intel_engine_is_virtual(engine)) 2018 clone->engines[n] = 2019 intel_execlists_clone_virtual(engine); 2020 else 2021 clone->engines[n] = intel_context_create(engine); 2022 if (IS_ERR_OR_NULL(clone->engines[n])) { 2023 __free_engines(clone, n); 2024 goto err_unlock; 2025 } 2026 2027 intel_context_set_gem(clone->engines[n], dst); 2028 } 2029 clone->num_engines = n; 2030 2031 user_engines = i915_gem_context_user_engines(src); 2032 i915_gem_context_unlock_engines(src); 2033 2034 /* Serialised by constructor */ 2035 free_engines(__context_engines_static(dst)); 2036 RCU_INIT_POINTER(dst->engines, clone); 2037 if (user_engines) 2038 i915_gem_context_set_user_engines(dst); 2039 else 2040 i915_gem_context_clear_user_engines(dst); 2041 return 0; 2042 2043 err_unlock: 2044 i915_gem_context_unlock_engines(src); 2045 return -ENOMEM; 2046 } 2047 2048 static int clone_flags(struct i915_gem_context *dst, 2049 struct i915_gem_context *src) 2050 { 2051 dst->user_flags = src->user_flags; 2052 return 0; 2053 } 2054 2055 static int clone_schedattr(struct i915_gem_context *dst, 2056 struct i915_gem_context *src) 2057 { 2058 dst->sched = src->sched; 2059 return 0; 2060 } 2061 2062 static int clone_sseu(struct i915_gem_context *dst, 2063 struct i915_gem_context *src) 2064 { 2065 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 2066 struct i915_gem_engines *clone; 2067 unsigned long n; 2068 int err; 2069 2070 /* no locking required; sole access under constructor*/ 2071 clone = __context_engines_static(dst); 2072 if (e->num_engines != clone->num_engines) { 2073 err = -EINVAL; 2074 goto unlock; 2075 } 2076 2077 for (n = 0; n < e->num_engines; n++) { 2078 struct intel_context *ce = e->engines[n]; 2079 2080 if (clone->engines[n]->engine->class != ce->engine->class) { 2081 /* Must have compatible engine maps! */ 2082 err = -EINVAL; 2083 goto unlock; 2084 } 2085 2086 /* serialises with set_sseu */ 2087 err = intel_context_lock_pinned(ce); 2088 if (err) 2089 goto unlock; 2090 2091 clone->engines[n]->sseu = ce->sseu; 2092 intel_context_unlock_pinned(ce); 2093 } 2094 2095 err = 0; 2096 unlock: 2097 i915_gem_context_unlock_engines(src); 2098 return err; 2099 } 2100 2101 static int clone_timeline(struct i915_gem_context *dst, 2102 struct i915_gem_context *src) 2103 { 2104 if (src->timeline) 2105 __assign_timeline(dst, src->timeline); 2106 2107 return 0; 2108 } 2109 2110 static int clone_vm(struct i915_gem_context *dst, 2111 struct i915_gem_context *src) 2112 { 2113 struct i915_address_space *vm; 2114 int err = 0; 2115 2116 if (!rcu_access_pointer(src->vm)) 2117 return 0; 2118 2119 rcu_read_lock(); 2120 vm = context_get_vm_rcu(src); 2121 rcu_read_unlock(); 2122 2123 if (!mutex_lock_interruptible(&dst->mutex)) { 2124 __assign_ppgtt(dst, vm); 2125 mutex_unlock(&dst->mutex); 2126 } else { 2127 err = -EINTR; 2128 } 2129 2130 i915_vm_put(vm); 2131 return err; 2132 } 2133 2134 static int create_clone(struct i915_user_extension __user *ext, void *data) 2135 { 2136 static int (* const fn[])(struct i915_gem_context *dst, 2137 struct i915_gem_context *src) = { 2138 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y 2139 MAP(ENGINES, clone_engines), 2140 MAP(FLAGS, clone_flags), 2141 MAP(SCHEDATTR, clone_schedattr), 2142 MAP(SSEU, clone_sseu), 2143 MAP(TIMELINE, clone_timeline), 2144 MAP(VM, clone_vm), 2145 #undef MAP 2146 }; 2147 struct drm_i915_gem_context_create_ext_clone local; 2148 const struct create_ext *arg = data; 2149 struct i915_gem_context *dst = arg->ctx; 2150 struct i915_gem_context *src; 2151 int err, bit; 2152 2153 if (copy_from_user(&local, ext, sizeof(local))) 2154 return -EFAULT; 2155 2156 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != 2157 I915_CONTEXT_CLONE_UNKNOWN); 2158 2159 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) 2160 return -EINVAL; 2161 2162 if (local.rsvd) 2163 return -EINVAL; 2164 2165 rcu_read_lock(); 2166 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); 2167 rcu_read_unlock(); 2168 if (!src) 2169 return -ENOENT; 2170 2171 GEM_BUG_ON(src == dst); 2172 2173 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { 2174 if (!(local.flags & BIT(bit))) 2175 continue; 2176 2177 err = fn[bit](dst, src); 2178 if (err) 2179 return err; 2180 } 2181 2182 return 0; 2183 } 2184 2185 static const i915_user_extension_fn create_extensions[] = { 2186 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 2187 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, 2188 }; 2189 2190 static bool client_is_banned(struct drm_i915_file_private *file_priv) 2191 { 2192 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 2193 } 2194 2195 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2196 struct drm_file *file) 2197 { 2198 struct drm_i915_private *i915 = to_i915(dev); 2199 struct drm_i915_gem_context_create_ext *args = data; 2200 struct create_ext ext_data; 2201 int ret; 2202 u32 id; 2203 2204 if (!DRIVER_CAPS(i915)->has_logical_contexts) 2205 return -ENODEV; 2206 2207 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 2208 return -EINVAL; 2209 2210 ret = intel_gt_terminally_wedged(&i915->gt); 2211 if (ret) 2212 return ret; 2213 2214 ext_data.fpriv = file->driver_priv; 2215 if (client_is_banned(ext_data.fpriv)) { 2216 DRM_DEBUG("client %s[%d] banned from creating ctx\n", 2217 current->comm, task_pid_nr(current)); 2218 return -EIO; 2219 } 2220 2221 ext_data.ctx = i915_gem_create_context(i915, args->flags); 2222 if (IS_ERR(ext_data.ctx)) 2223 return PTR_ERR(ext_data.ctx); 2224 2225 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 2226 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 2227 create_extensions, 2228 ARRAY_SIZE(create_extensions), 2229 &ext_data); 2230 if (ret) 2231 goto err_ctx; 2232 } 2233 2234 ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); 2235 if (ret < 0) 2236 goto err_ctx; 2237 2238 args->ctx_id = id; 2239 DRM_DEBUG("HW context %d created\n", args->ctx_id); 2240 2241 return 0; 2242 2243 err_ctx: 2244 context_close(ext_data.ctx); 2245 return ret; 2246 } 2247 2248 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2249 struct drm_file *file) 2250 { 2251 struct drm_i915_gem_context_destroy *args = data; 2252 struct drm_i915_file_private *file_priv = file->driver_priv; 2253 struct i915_gem_context *ctx; 2254 2255 if (args->pad != 0) 2256 return -EINVAL; 2257 2258 if (!args->ctx_id) 2259 return -ENOENT; 2260 2261 ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2262 if (!ctx) 2263 return -ENOENT; 2264 2265 context_close(ctx); 2266 return 0; 2267 } 2268 2269 static int get_sseu(struct i915_gem_context *ctx, 2270 struct drm_i915_gem_context_param *args) 2271 { 2272 struct drm_i915_gem_context_param_sseu user_sseu; 2273 struct intel_context *ce; 2274 unsigned long lookup; 2275 int err; 2276 2277 if (args->size == 0) 2278 goto out; 2279 else if (args->size < sizeof(user_sseu)) 2280 return -EINVAL; 2281 2282 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2283 sizeof(user_sseu))) 2284 return -EFAULT; 2285 2286 if (user_sseu.rsvd) 2287 return -EINVAL; 2288 2289 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2290 return -EINVAL; 2291 2292 lookup = 0; 2293 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2294 lookup |= LOOKUP_USER_INDEX; 2295 2296 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2297 if (IS_ERR(ce)) 2298 return PTR_ERR(ce); 2299 2300 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2301 if (err) { 2302 intel_context_put(ce); 2303 return err; 2304 } 2305 2306 user_sseu.slice_mask = ce->sseu.slice_mask; 2307 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2308 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2309 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2310 2311 intel_context_unlock_pinned(ce); 2312 intel_context_put(ce); 2313 2314 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2315 sizeof(user_sseu))) 2316 return -EFAULT; 2317 2318 out: 2319 args->size = sizeof(user_sseu); 2320 2321 return 0; 2322 } 2323 2324 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2325 struct drm_file *file) 2326 { 2327 struct drm_i915_file_private *file_priv = file->driver_priv; 2328 struct drm_i915_gem_context_param *args = data; 2329 struct i915_gem_context *ctx; 2330 int ret = 0; 2331 2332 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2333 if (!ctx) 2334 return -ENOENT; 2335 2336 switch (args->param) { 2337 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2338 args->size = 0; 2339 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2340 break; 2341 2342 case I915_CONTEXT_PARAM_GTT_SIZE: 2343 args->size = 0; 2344 rcu_read_lock(); 2345 if (rcu_access_pointer(ctx->vm)) 2346 args->value = rcu_dereference(ctx->vm)->total; 2347 else 2348 args->value = to_i915(dev)->ggtt.vm.total; 2349 rcu_read_unlock(); 2350 break; 2351 2352 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2353 args->size = 0; 2354 args->value = i915_gem_context_no_error_capture(ctx); 2355 break; 2356 2357 case I915_CONTEXT_PARAM_BANNABLE: 2358 args->size = 0; 2359 args->value = i915_gem_context_is_bannable(ctx); 2360 break; 2361 2362 case I915_CONTEXT_PARAM_RECOVERABLE: 2363 args->size = 0; 2364 args->value = i915_gem_context_is_recoverable(ctx); 2365 break; 2366 2367 case I915_CONTEXT_PARAM_PRIORITY: 2368 args->size = 0; 2369 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; 2370 break; 2371 2372 case I915_CONTEXT_PARAM_SSEU: 2373 ret = get_sseu(ctx, args); 2374 break; 2375 2376 case I915_CONTEXT_PARAM_VM: 2377 ret = get_ppgtt(file_priv, ctx, args); 2378 break; 2379 2380 case I915_CONTEXT_PARAM_ENGINES: 2381 ret = get_engines(ctx, args); 2382 break; 2383 2384 case I915_CONTEXT_PARAM_PERSISTENCE: 2385 args->size = 0; 2386 args->value = i915_gem_context_is_persistent(ctx); 2387 break; 2388 2389 case I915_CONTEXT_PARAM_BAN_PERIOD: 2390 default: 2391 ret = -EINVAL; 2392 break; 2393 } 2394 2395 i915_gem_context_put(ctx); 2396 return ret; 2397 } 2398 2399 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2400 struct drm_file *file) 2401 { 2402 struct drm_i915_file_private *file_priv = file->driver_priv; 2403 struct drm_i915_gem_context_param *args = data; 2404 struct i915_gem_context *ctx; 2405 int ret; 2406 2407 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2408 if (!ctx) 2409 return -ENOENT; 2410 2411 ret = ctx_setparam(file_priv, ctx, args); 2412 2413 i915_gem_context_put(ctx); 2414 return ret; 2415 } 2416 2417 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2418 void *data, struct drm_file *file) 2419 { 2420 struct drm_i915_private *i915 = to_i915(dev); 2421 struct drm_i915_reset_stats *args = data; 2422 struct i915_gem_context *ctx; 2423 int ret; 2424 2425 if (args->flags || args->pad) 2426 return -EINVAL; 2427 2428 ret = -ENOENT; 2429 rcu_read_lock(); 2430 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); 2431 if (!ctx) 2432 goto out; 2433 2434 /* 2435 * We opt for unserialised reads here. This may result in tearing 2436 * in the extremely unlikely event of a GPU hang on this context 2437 * as we are querying them. If we need that extra layer of protection, 2438 * we should wrap the hangstats with a seqlock. 2439 */ 2440 2441 if (capable(CAP_SYS_ADMIN)) 2442 args->reset_count = i915_reset_count(&i915->gpu_error); 2443 else 2444 args->reset_count = 0; 2445 2446 args->batch_active = atomic_read(&ctx->guilty_count); 2447 args->batch_pending = atomic_read(&ctx->active_count); 2448 2449 ret = 0; 2450 out: 2451 rcu_read_unlock(); 2452 return ret; 2453 } 2454 2455 /* GEM context-engines iterator: for_each_gem_engine() */ 2456 struct intel_context * 2457 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2458 { 2459 const struct i915_gem_engines *e = it->engines; 2460 struct intel_context *ctx; 2461 2462 do { 2463 if (it->idx >= e->num_engines) 2464 return NULL; 2465 2466 ctx = e->engines[it->idx++]; 2467 } while (!ctx); 2468 2469 return ctx; 2470 } 2471 2472 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2473 #include "selftests/mock_context.c" 2474 #include "selftests/i915_gem_context.c" 2475 #endif 2476 2477 static void i915_global_gem_context_shrink(void) 2478 { 2479 kmem_cache_shrink(global.slab_luts); 2480 } 2481 2482 static void i915_global_gem_context_exit(void) 2483 { 2484 kmem_cache_destroy(global.slab_luts); 2485 } 2486 2487 static struct i915_global_gem_context global = { { 2488 .shrink = i915_global_gem_context_shrink, 2489 .exit = i915_global_gem_context_exit, 2490 } }; 2491 2492 int __init i915_global_gem_context_init(void) 2493 { 2494 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2495 if (!global.slab_luts) 2496 return -ENOMEM; 2497 2498 i915_global_register(&global.base); 2499 return 0; 2500 } 2501