1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2011-2012 Intel Corporation 5 */ 6 7 /* 8 * This file implements HW context support. On gen5+ a HW context consists of an 9 * opaque GPU object which is referenced at times of context saves and restores. 10 * With RC6 enabled, the context is also referenced as the GPU enters and exists 11 * from RC6 (GPU has it's own internal power context, except on gen5). Though 12 * something like a context does exist for the media ring, the code only 13 * supports contexts for the render ring. 14 * 15 * In software, there is a distinction between contexts created by the user, 16 * and the default HW context. The default HW context is used by GPU clients 17 * that do not request setup of their own hardware context. The default 18 * context's state is never restored to help prevent programming errors. This 19 * would happen if a client ran and piggy-backed off another clients GPU state. 20 * The default context only exists to give the GPU some offset to load as the 21 * current to invoke a save of the context we actually care about. In fact, the 22 * code could likely be constructed, albeit in a more complicated fashion, to 23 * never use the default context, though that limits the driver's ability to 24 * swap out, and/or destroy other contexts. 25 * 26 * All other contexts are created as a request by the GPU client. These contexts 27 * store GPU state, and thus allow GPU clients to not re-emit state (and 28 * potentially query certain state) at any time. The kernel driver makes 29 * certain that the appropriate commands are inserted. 30 * 31 * The context life cycle is semi-complicated in that context BOs may live 32 * longer than the context itself because of the way the hardware, and object 33 * tracking works. Below is a very crude representation of the state machine 34 * describing the context life. 35 * refcount pincount active 36 * S0: initial state 0 0 0 37 * S1: context created 1 0 0 38 * S2: context is currently running 2 1 X 39 * S3: GPU referenced, but not current 2 0 1 40 * S4: context is current, but destroyed 1 1 0 41 * S5: like S3, but destroyed 1 0 1 42 * 43 * The most common (but not all) transitions: 44 * S0->S1: client creates a context 45 * S1->S2: client submits execbuf with context 46 * S2->S3: other clients submits execbuf with context 47 * S3->S1: context object was retired 48 * S3->S2: clients submits another execbuf 49 * S2->S4: context destroy called with current context 50 * S3->S5->S0: destroy path 51 * S4->S5->S0: destroy path on current context 52 * 53 * There are two confusing terms used above: 54 * The "current context" means the context which is currently running on the 55 * GPU. The GPU has loaded its state already and has stored away the gtt 56 * offset of the BO. The GPU is not actively referencing the data at this 57 * offset, but it will on the next context switch. The only way to avoid this 58 * is to do a GPU reset. 59 * 60 * An "active context' is one which was previously the "current context" and is 61 * on the active list waiting for the next context switch to occur. Until this 62 * happens, the object must remain at the same gtt offset. It is therefore 63 * possible to destroy a context, but it is still active. 64 * 65 */ 66 67 #include <linux/log2.h> 68 #include <linux/nospec.h> 69 70 #include "gt/gen6_ppgtt.h" 71 #include "gt/intel_context.h" 72 #include "gt/intel_context_param.h" 73 #include "gt/intel_engine_heartbeat.h" 74 #include "gt/intel_engine_user.h" 75 #include "gt/intel_execlists_submission.h" /* virtual_engine */ 76 #include "gt/intel_gpu_commands.h" 77 #include "gt/intel_ring.h" 78 79 #include "i915_gem_context.h" 80 #include "i915_globals.h" 81 #include "i915_trace.h" 82 #include "i915_user_extensions.h" 83 84 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 85 86 static struct i915_global_gem_context { 87 struct i915_global base; 88 struct kmem_cache *slab_luts; 89 } global; 90 91 struct i915_lut_handle *i915_lut_handle_alloc(void) 92 { 93 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); 94 } 95 96 void i915_lut_handle_free(struct i915_lut_handle *lut) 97 { 98 return kmem_cache_free(global.slab_luts, lut); 99 } 100 101 static void lut_close(struct i915_gem_context *ctx) 102 { 103 struct radix_tree_iter iter; 104 void __rcu **slot; 105 106 mutex_lock(&ctx->lut_mutex); 107 rcu_read_lock(); 108 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 109 struct i915_vma *vma = rcu_dereference_raw(*slot); 110 struct drm_i915_gem_object *obj = vma->obj; 111 struct i915_lut_handle *lut; 112 113 if (!kref_get_unless_zero(&obj->base.refcount)) 114 continue; 115 116 spin_lock(&obj->lut_lock); 117 list_for_each_entry(lut, &obj->lut_list, obj_link) { 118 if (lut->ctx != ctx) 119 continue; 120 121 if (lut->handle != iter.index) 122 continue; 123 124 list_del(&lut->obj_link); 125 break; 126 } 127 spin_unlock(&obj->lut_lock); 128 129 if (&lut->obj_link != &obj->lut_list) { 130 i915_lut_handle_free(lut); 131 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 132 i915_vma_close(vma); 133 i915_gem_object_put(obj); 134 } 135 136 i915_gem_object_put(obj); 137 } 138 rcu_read_unlock(); 139 mutex_unlock(&ctx->lut_mutex); 140 } 141 142 static struct intel_context * 143 lookup_user_engine(struct i915_gem_context *ctx, 144 unsigned long flags, 145 const struct i915_engine_class_instance *ci) 146 #define LOOKUP_USER_INDEX BIT(0) 147 { 148 int idx; 149 150 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 151 return ERR_PTR(-EINVAL); 152 153 if (!i915_gem_context_user_engines(ctx)) { 154 struct intel_engine_cs *engine; 155 156 engine = intel_engine_lookup_user(ctx->i915, 157 ci->engine_class, 158 ci->engine_instance); 159 if (!engine) 160 return ERR_PTR(-EINVAL); 161 162 idx = engine->legacy_idx; 163 } else { 164 idx = ci->engine_instance; 165 } 166 167 return i915_gem_context_get_engine(ctx, idx); 168 } 169 170 static struct i915_address_space * 171 context_get_vm_rcu(struct i915_gem_context *ctx) 172 { 173 GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); 174 175 do { 176 struct i915_address_space *vm; 177 178 /* 179 * We do not allow downgrading from full-ppgtt [to a shared 180 * global gtt], so ctx->vm cannot become NULL. 181 */ 182 vm = rcu_dereference(ctx->vm); 183 if (!kref_get_unless_zero(&vm->ref)) 184 continue; 185 186 /* 187 * This ppgtt may have be reallocated between 188 * the read and the kref, and reassigned to a third 189 * context. In order to avoid inadvertent sharing 190 * of this ppgtt with that third context (and not 191 * src), we have to confirm that we have the same 192 * ppgtt after passing through the strong memory 193 * barrier implied by a successful 194 * kref_get_unless_zero(). 195 * 196 * Once we have acquired the current ppgtt of ctx, 197 * we no longer care if it is released from ctx, as 198 * it cannot be reallocated elsewhere. 199 */ 200 201 if (vm == rcu_access_pointer(ctx->vm)) 202 return rcu_pointer_handoff(vm); 203 204 i915_vm_put(vm); 205 } while (1); 206 } 207 208 static void intel_context_set_gem(struct intel_context *ce, 209 struct i915_gem_context *ctx) 210 { 211 GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 212 RCU_INIT_POINTER(ce->gem_context, ctx); 213 214 ce->ring_size = SZ_16K; 215 216 if (rcu_access_pointer(ctx->vm)) { 217 struct i915_address_space *vm; 218 219 rcu_read_lock(); 220 vm = context_get_vm_rcu(ctx); /* hmm */ 221 rcu_read_unlock(); 222 223 i915_vm_put(ce->vm); 224 ce->vm = vm; 225 } 226 227 GEM_BUG_ON(ce->timeline); 228 if (ctx->timeline) 229 ce->timeline = intel_timeline_get(ctx->timeline); 230 231 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 232 intel_engine_has_timeslices(ce->engine)) 233 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 234 235 if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) && 236 ctx->i915->params.request_timeout_ms) { 237 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; 238 239 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000); 240 } 241 } 242 243 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 244 { 245 while (count--) { 246 if (!e->engines[count]) 247 continue; 248 249 intel_context_put(e->engines[count]); 250 } 251 kfree(e); 252 } 253 254 static void free_engines(struct i915_gem_engines *e) 255 { 256 __free_engines(e, e->num_engines); 257 } 258 259 static void free_engines_rcu(struct rcu_head *rcu) 260 { 261 struct i915_gem_engines *engines = 262 container_of(rcu, struct i915_gem_engines, rcu); 263 264 i915_sw_fence_fini(&engines->fence); 265 free_engines(engines); 266 } 267 268 static int __i915_sw_fence_call 269 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 270 { 271 struct i915_gem_engines *engines = 272 container_of(fence, typeof(*engines), fence); 273 274 switch (state) { 275 case FENCE_COMPLETE: 276 if (!list_empty(&engines->link)) { 277 struct i915_gem_context *ctx = engines->ctx; 278 unsigned long flags; 279 280 spin_lock_irqsave(&ctx->stale.lock, flags); 281 list_del(&engines->link); 282 spin_unlock_irqrestore(&ctx->stale.lock, flags); 283 } 284 i915_gem_context_put(engines->ctx); 285 break; 286 287 case FENCE_FREE: 288 init_rcu_head(&engines->rcu); 289 call_rcu(&engines->rcu, free_engines_rcu); 290 break; 291 } 292 293 return NOTIFY_DONE; 294 } 295 296 static struct i915_gem_engines *alloc_engines(unsigned int count) 297 { 298 struct i915_gem_engines *e; 299 300 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); 301 if (!e) 302 return NULL; 303 304 i915_sw_fence_init(&e->fence, engines_notify); 305 return e; 306 } 307 308 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) 309 { 310 const struct intel_gt *gt = &ctx->i915->gt; 311 struct intel_engine_cs *engine; 312 struct i915_gem_engines *e; 313 enum intel_engine_id id; 314 315 e = alloc_engines(I915_NUM_ENGINES); 316 if (!e) 317 return ERR_PTR(-ENOMEM); 318 319 for_each_engine(engine, gt, id) { 320 struct intel_context *ce; 321 322 if (engine->legacy_idx == INVALID_ENGINE) 323 continue; 324 325 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 326 GEM_BUG_ON(e->engines[engine->legacy_idx]); 327 328 ce = intel_context_create(engine); 329 if (IS_ERR(ce)) { 330 __free_engines(e, e->num_engines + 1); 331 return ERR_CAST(ce); 332 } 333 334 intel_context_set_gem(ce, ctx); 335 336 e->engines[engine->legacy_idx] = ce; 337 e->num_engines = max(e->num_engines, engine->legacy_idx); 338 } 339 e->num_engines++; 340 341 return e; 342 } 343 344 void i915_gem_context_release(struct kref *ref) 345 { 346 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 347 348 trace_i915_context_free(ctx); 349 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 350 351 mutex_destroy(&ctx->engines_mutex); 352 mutex_destroy(&ctx->lut_mutex); 353 354 if (ctx->timeline) 355 intel_timeline_put(ctx->timeline); 356 357 put_pid(ctx->pid); 358 mutex_destroy(&ctx->mutex); 359 360 kfree_rcu(ctx, rcu); 361 } 362 363 static inline struct i915_gem_engines * 364 __context_engines_static(const struct i915_gem_context *ctx) 365 { 366 return rcu_dereference_protected(ctx->engines, true); 367 } 368 369 static void __reset_context(struct i915_gem_context *ctx, 370 struct intel_engine_cs *engine) 371 { 372 intel_gt_handle_error(engine->gt, engine->mask, 0, 373 "context closure in %s", ctx->name); 374 } 375 376 static bool __cancel_engine(struct intel_engine_cs *engine) 377 { 378 /* 379 * Send a "high priority pulse" down the engine to cause the 380 * current request to be momentarily preempted. (If it fails to 381 * be preempted, it will be reset). As we have marked our context 382 * as banned, any incomplete request, including any running, will 383 * be skipped following the preemption. 384 * 385 * If there is no hangchecking (one of the reasons why we try to 386 * cancel the context) and no forced preemption, there may be no 387 * means by which we reset the GPU and evict the persistent hog. 388 * Ergo if we are unable to inject a preemptive pulse that can 389 * kill the banned context, we fallback to doing a local reset 390 * instead. 391 */ 392 return intel_engine_pulse(engine) == 0; 393 } 394 395 static struct intel_engine_cs *active_engine(struct intel_context *ce) 396 { 397 struct intel_engine_cs *engine = NULL; 398 struct i915_request *rq; 399 400 if (intel_context_has_inflight(ce)) 401 return intel_context_inflight(ce); 402 403 if (!ce->timeline) 404 return NULL; 405 406 /* 407 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference 408 * to the request to prevent it being transferred to a new timeline 409 * (and onto a new timeline->requests list). 410 */ 411 rcu_read_lock(); 412 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 413 bool found; 414 415 /* timeline is already completed upto this point? */ 416 if (!i915_request_get_rcu(rq)) 417 break; 418 419 /* Check with the backend if the request is inflight */ 420 found = true; 421 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) 422 found = i915_request_active_engine(rq, &engine); 423 424 i915_request_put(rq); 425 if (found) 426 break; 427 } 428 rcu_read_unlock(); 429 430 return engine; 431 } 432 433 static void kill_engines(struct i915_gem_engines *engines, bool ban) 434 { 435 struct i915_gem_engines_iter it; 436 struct intel_context *ce; 437 438 /* 439 * Map the user's engine back to the actual engines; one virtual 440 * engine will be mapped to multiple engines, and using ctx->engine[] 441 * the same engine may be have multiple instances in the user's map. 442 * However, we only care about pending requests, so only include 443 * engines on which there are incomplete requests. 444 */ 445 for_each_gem_engine(ce, engines, it) { 446 struct intel_engine_cs *engine; 447 448 if (ban && intel_context_set_banned(ce)) 449 continue; 450 451 /* 452 * Check the current active state of this context; if we 453 * are currently executing on the GPU we need to evict 454 * ourselves. On the other hand, if we haven't yet been 455 * submitted to the GPU or if everything is complete, 456 * we have nothing to do. 457 */ 458 engine = active_engine(ce); 459 460 /* First attempt to gracefully cancel the context */ 461 if (engine && !__cancel_engine(engine) && ban) 462 /* 463 * If we are unable to send a preemptive pulse to bump 464 * the context from the GPU, we have to resort to a full 465 * reset. We hope the collateral damage is worth it. 466 */ 467 __reset_context(engines->ctx, engine); 468 } 469 } 470 471 static void kill_context(struct i915_gem_context *ctx) 472 { 473 bool ban = (!i915_gem_context_is_persistent(ctx) || 474 !ctx->i915->params.enable_hangcheck); 475 struct i915_gem_engines *pos, *next; 476 477 spin_lock_irq(&ctx->stale.lock); 478 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 479 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { 480 if (!i915_sw_fence_await(&pos->fence)) { 481 list_del_init(&pos->link); 482 continue; 483 } 484 485 spin_unlock_irq(&ctx->stale.lock); 486 487 kill_engines(pos, ban); 488 489 spin_lock_irq(&ctx->stale.lock); 490 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); 491 list_safe_reset_next(pos, next, link); 492 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ 493 494 i915_sw_fence_complete(&pos->fence); 495 } 496 spin_unlock_irq(&ctx->stale.lock); 497 } 498 499 static void engines_idle_release(struct i915_gem_context *ctx, 500 struct i915_gem_engines *engines) 501 { 502 struct i915_gem_engines_iter it; 503 struct intel_context *ce; 504 505 INIT_LIST_HEAD(&engines->link); 506 507 engines->ctx = i915_gem_context_get(ctx); 508 509 for_each_gem_engine(ce, engines, it) { 510 int err; 511 512 /* serialises with execbuf */ 513 set_bit(CONTEXT_CLOSED_BIT, &ce->flags); 514 if (!intel_context_pin_if_active(ce)) 515 continue; 516 517 /* Wait until context is finally scheduled out and retired */ 518 err = i915_sw_fence_await_active(&engines->fence, 519 &ce->active, 520 I915_ACTIVE_AWAIT_BARRIER); 521 intel_context_unpin(ce); 522 if (err) 523 goto kill; 524 } 525 526 spin_lock_irq(&ctx->stale.lock); 527 if (!i915_gem_context_is_closed(ctx)) 528 list_add_tail(&engines->link, &ctx->stale.engines); 529 spin_unlock_irq(&ctx->stale.lock); 530 531 kill: 532 if (list_empty(&engines->link)) /* raced, already closed */ 533 kill_engines(engines, true); 534 535 i915_sw_fence_commit(&engines->fence); 536 } 537 538 static void set_closed_name(struct i915_gem_context *ctx) 539 { 540 char *s; 541 542 /* Replace '[]' with '<>' to indicate closed in debug prints */ 543 544 s = strrchr(ctx->name, '['); 545 if (!s) 546 return; 547 548 *s = '<'; 549 550 s = strchr(s + 1, ']'); 551 if (s) 552 *s = '>'; 553 } 554 555 static void context_close(struct i915_gem_context *ctx) 556 { 557 struct i915_address_space *vm; 558 559 /* Flush any concurrent set_engines() */ 560 mutex_lock(&ctx->engines_mutex); 561 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); 562 i915_gem_context_set_closed(ctx); 563 mutex_unlock(&ctx->engines_mutex); 564 565 mutex_lock(&ctx->mutex); 566 567 set_closed_name(ctx); 568 569 vm = i915_gem_context_vm(ctx); 570 if (vm) 571 i915_vm_close(vm); 572 573 ctx->file_priv = ERR_PTR(-EBADF); 574 575 /* 576 * The LUT uses the VMA as a backpointer to unref the object, 577 * so we need to clear the LUT before we close all the VMA (inside 578 * the ppgtt). 579 */ 580 lut_close(ctx); 581 582 spin_lock(&ctx->i915->gem.contexts.lock); 583 list_del(&ctx->link); 584 spin_unlock(&ctx->i915->gem.contexts.lock); 585 586 mutex_unlock(&ctx->mutex); 587 588 /* 589 * If the user has disabled hangchecking, we can not be sure that 590 * the batches will ever complete after the context is closed, 591 * keeping the context and all resources pinned forever. So in this 592 * case we opt to forcibly kill off all remaining requests on 593 * context close. 594 */ 595 kill_context(ctx); 596 597 i915_gem_context_put(ctx); 598 } 599 600 static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 601 { 602 if (i915_gem_context_is_persistent(ctx) == state) 603 return 0; 604 605 if (state) { 606 /* 607 * Only contexts that are short-lived [that will expire or be 608 * reset] are allowed to survive past termination. We require 609 * hangcheck to ensure that the persistent requests are healthy. 610 */ 611 if (!ctx->i915->params.enable_hangcheck) 612 return -EINVAL; 613 614 i915_gem_context_set_persistence(ctx); 615 } else { 616 /* To cancel a context we use "preempt-to-idle" */ 617 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 618 return -ENODEV; 619 620 /* 621 * If the cancel fails, we then need to reset, cleanly! 622 * 623 * If the per-engine reset fails, all hope is lost! We resort 624 * to a full GPU reset in that unlikely case, but realistically 625 * if the engine could not reset, the full reset does not fare 626 * much better. The damage has been done. 627 * 628 * However, if we cannot reset an engine by itself, we cannot 629 * cleanup a hanging persistent context without causing 630 * colateral damage, and we should not pretend we can by 631 * exposing the interface. 632 */ 633 if (!intel_has_reset_engine(&ctx->i915->gt)) 634 return -ENODEV; 635 636 i915_gem_context_clear_persistence(ctx); 637 } 638 639 return 0; 640 } 641 642 static struct i915_gem_context * 643 __create_context(struct drm_i915_private *i915) 644 { 645 struct i915_gem_context *ctx; 646 struct i915_gem_engines *e; 647 int err; 648 int i; 649 650 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 651 if (!ctx) 652 return ERR_PTR(-ENOMEM); 653 654 kref_init(&ctx->ref); 655 ctx->i915 = i915; 656 ctx->sched.priority = I915_PRIORITY_NORMAL; 657 mutex_init(&ctx->mutex); 658 INIT_LIST_HEAD(&ctx->link); 659 660 spin_lock_init(&ctx->stale.lock); 661 INIT_LIST_HEAD(&ctx->stale.engines); 662 663 mutex_init(&ctx->engines_mutex); 664 e = default_engines(ctx); 665 if (IS_ERR(e)) { 666 err = PTR_ERR(e); 667 goto err_free; 668 } 669 RCU_INIT_POINTER(ctx->engines, e); 670 671 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 672 mutex_init(&ctx->lut_mutex); 673 674 /* NB: Mark all slices as needing a remap so that when the context first 675 * loads it will restore whatever remap state already exists. If there 676 * is no remap info, it will be a NOP. */ 677 ctx->remap_slice = ALL_L3_SLICES(i915); 678 679 i915_gem_context_set_bannable(ctx); 680 i915_gem_context_set_recoverable(ctx); 681 __context_set_persistence(ctx, true /* cgroup hook? */); 682 683 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 684 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 685 686 return ctx; 687 688 err_free: 689 kfree(ctx); 690 return ERR_PTR(err); 691 } 692 693 static inline struct i915_gem_engines * 694 __context_engines_await(const struct i915_gem_context *ctx, 695 bool *user_engines) 696 { 697 struct i915_gem_engines *engines; 698 699 rcu_read_lock(); 700 do { 701 engines = rcu_dereference(ctx->engines); 702 GEM_BUG_ON(!engines); 703 704 if (user_engines) 705 *user_engines = i915_gem_context_user_engines(ctx); 706 707 /* successful await => strong mb */ 708 if (unlikely(!i915_sw_fence_await(&engines->fence))) 709 continue; 710 711 if (likely(engines == rcu_access_pointer(ctx->engines))) 712 break; 713 714 i915_sw_fence_complete(&engines->fence); 715 } while (1); 716 rcu_read_unlock(); 717 718 return engines; 719 } 720 721 static int 722 context_apply_all(struct i915_gem_context *ctx, 723 int (*fn)(struct intel_context *ce, void *data), 724 void *data) 725 { 726 struct i915_gem_engines_iter it; 727 struct i915_gem_engines *e; 728 struct intel_context *ce; 729 int err = 0; 730 731 e = __context_engines_await(ctx, NULL); 732 for_each_gem_engine(ce, e, it) { 733 err = fn(ce, data); 734 if (err) 735 break; 736 } 737 i915_sw_fence_complete(&e->fence); 738 739 return err; 740 } 741 742 static int __apply_ppgtt(struct intel_context *ce, void *vm) 743 { 744 i915_vm_put(ce->vm); 745 ce->vm = i915_vm_get(vm); 746 return 0; 747 } 748 749 static struct i915_address_space * 750 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) 751 { 752 struct i915_address_space *old; 753 754 old = rcu_replace_pointer(ctx->vm, 755 i915_vm_open(vm), 756 lockdep_is_held(&ctx->mutex)); 757 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); 758 759 context_apply_all(ctx, __apply_ppgtt, vm); 760 761 return old; 762 } 763 764 static void __assign_ppgtt(struct i915_gem_context *ctx, 765 struct i915_address_space *vm) 766 { 767 if (vm == rcu_access_pointer(ctx->vm)) 768 return; 769 770 vm = __set_ppgtt(ctx, vm); 771 if (vm) 772 i915_vm_close(vm); 773 } 774 775 static void __set_timeline(struct intel_timeline **dst, 776 struct intel_timeline *src) 777 { 778 struct intel_timeline *old = *dst; 779 780 *dst = src ? intel_timeline_get(src) : NULL; 781 782 if (old) 783 intel_timeline_put(old); 784 } 785 786 static int __apply_timeline(struct intel_context *ce, void *timeline) 787 { 788 __set_timeline(&ce->timeline, timeline); 789 return 0; 790 } 791 792 static void __assign_timeline(struct i915_gem_context *ctx, 793 struct intel_timeline *timeline) 794 { 795 __set_timeline(&ctx->timeline, timeline); 796 context_apply_all(ctx, __apply_timeline, timeline); 797 } 798 799 static struct i915_gem_context * 800 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) 801 { 802 struct i915_gem_context *ctx; 803 804 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && 805 !HAS_EXECLISTS(i915)) 806 return ERR_PTR(-EINVAL); 807 808 ctx = __create_context(i915); 809 if (IS_ERR(ctx)) 810 return ctx; 811 812 if (HAS_FULL_PPGTT(i915)) { 813 struct i915_ppgtt *ppgtt; 814 815 ppgtt = i915_ppgtt_create(&i915->gt); 816 if (IS_ERR(ppgtt)) { 817 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 818 PTR_ERR(ppgtt)); 819 context_close(ctx); 820 return ERR_CAST(ppgtt); 821 } 822 823 mutex_lock(&ctx->mutex); 824 __assign_ppgtt(ctx, &ppgtt->vm); 825 mutex_unlock(&ctx->mutex); 826 827 i915_vm_put(&ppgtt->vm); 828 } 829 830 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 831 struct intel_timeline *timeline; 832 833 timeline = intel_timeline_create(&i915->gt); 834 if (IS_ERR(timeline)) { 835 context_close(ctx); 836 return ERR_CAST(timeline); 837 } 838 839 __assign_timeline(ctx, timeline); 840 intel_timeline_put(timeline); 841 } 842 843 trace_i915_context_create(ctx); 844 845 return ctx; 846 } 847 848 static void init_contexts(struct i915_gem_contexts *gc) 849 { 850 spin_lock_init(&gc->lock); 851 INIT_LIST_HEAD(&gc->list); 852 } 853 854 void i915_gem_init__contexts(struct drm_i915_private *i915) 855 { 856 init_contexts(&i915->gem.contexts); 857 } 858 859 static int gem_context_register(struct i915_gem_context *ctx, 860 struct drm_i915_file_private *fpriv, 861 u32 *id) 862 { 863 struct drm_i915_private *i915 = ctx->i915; 864 struct i915_address_space *vm; 865 int ret; 866 867 ctx->file_priv = fpriv; 868 869 mutex_lock(&ctx->mutex); 870 vm = i915_gem_context_vm(ctx); 871 if (vm) 872 WRITE_ONCE(vm->file, fpriv); /* XXX */ 873 mutex_unlock(&ctx->mutex); 874 875 ctx->pid = get_task_pid(current, PIDTYPE_PID); 876 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 877 current->comm, pid_nr(ctx->pid)); 878 879 /* And finally expose ourselves to userspace via the idr */ 880 ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); 881 if (ret) 882 goto err_pid; 883 884 spin_lock(&i915->gem.contexts.lock); 885 list_add_tail(&ctx->link, &i915->gem.contexts.list); 886 spin_unlock(&i915->gem.contexts.lock); 887 888 return 0; 889 890 err_pid: 891 put_pid(fetch_and_zero(&ctx->pid)); 892 return ret; 893 } 894 895 int i915_gem_context_open(struct drm_i915_private *i915, 896 struct drm_file *file) 897 { 898 struct drm_i915_file_private *file_priv = file->driver_priv; 899 struct i915_gem_context *ctx; 900 int err; 901 u32 id; 902 903 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); 904 905 /* 0 reserved for invalid/unassigned ppgtt */ 906 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 907 908 ctx = i915_gem_create_context(i915, 0); 909 if (IS_ERR(ctx)) { 910 err = PTR_ERR(ctx); 911 goto err; 912 } 913 914 err = gem_context_register(ctx, file_priv, &id); 915 if (err < 0) 916 goto err_ctx; 917 918 GEM_BUG_ON(id); 919 return 0; 920 921 err_ctx: 922 context_close(ctx); 923 err: 924 xa_destroy(&file_priv->vm_xa); 925 xa_destroy(&file_priv->context_xa); 926 return err; 927 } 928 929 void i915_gem_context_close(struct drm_file *file) 930 { 931 struct drm_i915_file_private *file_priv = file->driver_priv; 932 struct i915_address_space *vm; 933 struct i915_gem_context *ctx; 934 unsigned long idx; 935 936 xa_for_each(&file_priv->context_xa, idx, ctx) 937 context_close(ctx); 938 xa_destroy(&file_priv->context_xa); 939 940 xa_for_each(&file_priv->vm_xa, idx, vm) 941 i915_vm_put(vm); 942 xa_destroy(&file_priv->vm_xa); 943 } 944 945 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 946 struct drm_file *file) 947 { 948 struct drm_i915_private *i915 = to_i915(dev); 949 struct drm_i915_gem_vm_control *args = data; 950 struct drm_i915_file_private *file_priv = file->driver_priv; 951 struct i915_ppgtt *ppgtt; 952 u32 id; 953 int err; 954 955 if (!HAS_FULL_PPGTT(i915)) 956 return -ENODEV; 957 958 if (args->flags) 959 return -EINVAL; 960 961 ppgtt = i915_ppgtt_create(&i915->gt); 962 if (IS_ERR(ppgtt)) 963 return PTR_ERR(ppgtt); 964 965 ppgtt->vm.file = file_priv; 966 967 if (args->extensions) { 968 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 969 NULL, 0, 970 ppgtt); 971 if (err) 972 goto err_put; 973 } 974 975 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 976 xa_limit_32b, GFP_KERNEL); 977 if (err) 978 goto err_put; 979 980 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 981 args->vm_id = id; 982 return 0; 983 984 err_put: 985 i915_vm_put(&ppgtt->vm); 986 return err; 987 } 988 989 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 990 struct drm_file *file) 991 { 992 struct drm_i915_file_private *file_priv = file->driver_priv; 993 struct drm_i915_gem_vm_control *args = data; 994 struct i915_address_space *vm; 995 996 if (args->flags) 997 return -EINVAL; 998 999 if (args->extensions) 1000 return -EINVAL; 1001 1002 vm = xa_erase(&file_priv->vm_xa, args->vm_id); 1003 if (!vm) 1004 return -ENOENT; 1005 1006 i915_vm_put(vm); 1007 return 0; 1008 } 1009 1010 struct context_barrier_task { 1011 struct i915_active base; 1012 void (*task)(void *data); 1013 void *data; 1014 }; 1015 1016 static void cb_retire(struct i915_active *base) 1017 { 1018 struct context_barrier_task *cb = container_of(base, typeof(*cb), base); 1019 1020 if (cb->task) 1021 cb->task(cb->data); 1022 1023 i915_active_fini(&cb->base); 1024 kfree(cb); 1025 } 1026 1027 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); 1028 static int context_barrier_task(struct i915_gem_context *ctx, 1029 intel_engine_mask_t engines, 1030 bool (*skip)(struct intel_context *ce, void *data), 1031 int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data), 1032 int (*emit)(struct i915_request *rq, void *data), 1033 void (*task)(void *data), 1034 void *data) 1035 { 1036 struct context_barrier_task *cb; 1037 struct i915_gem_engines_iter it; 1038 struct i915_gem_engines *e; 1039 struct i915_gem_ww_ctx ww; 1040 struct intel_context *ce; 1041 int err = 0; 1042 1043 GEM_BUG_ON(!task); 1044 1045 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 1046 if (!cb) 1047 return -ENOMEM; 1048 1049 i915_active_init(&cb->base, NULL, cb_retire, 0); 1050 err = i915_active_acquire(&cb->base); 1051 if (err) { 1052 kfree(cb); 1053 return err; 1054 } 1055 1056 e = __context_engines_await(ctx, NULL); 1057 if (!e) { 1058 i915_active_release(&cb->base); 1059 return -ENOENT; 1060 } 1061 1062 for_each_gem_engine(ce, e, it) { 1063 struct i915_request *rq; 1064 1065 if (I915_SELFTEST_ONLY(context_barrier_inject_fault & 1066 ce->engine->mask)) { 1067 err = -ENXIO; 1068 break; 1069 } 1070 1071 if (!(ce->engine->mask & engines)) 1072 continue; 1073 1074 if (skip && skip(ce, data)) 1075 continue; 1076 1077 i915_gem_ww_ctx_init(&ww, true); 1078 retry: 1079 err = intel_context_pin_ww(ce, &ww); 1080 if (err) 1081 goto err; 1082 1083 if (pin) 1084 err = pin(ce, &ww, data); 1085 if (err) 1086 goto err_unpin; 1087 1088 rq = i915_request_create(ce); 1089 if (IS_ERR(rq)) { 1090 err = PTR_ERR(rq); 1091 goto err_unpin; 1092 } 1093 1094 err = 0; 1095 if (emit) 1096 err = emit(rq, data); 1097 if (err == 0) 1098 err = i915_active_add_request(&cb->base, rq); 1099 1100 i915_request_add(rq); 1101 err_unpin: 1102 intel_context_unpin(ce); 1103 err: 1104 if (err == -EDEADLK) { 1105 err = i915_gem_ww_ctx_backoff(&ww); 1106 if (!err) 1107 goto retry; 1108 } 1109 i915_gem_ww_ctx_fini(&ww); 1110 1111 if (err) 1112 break; 1113 } 1114 i915_sw_fence_complete(&e->fence); 1115 1116 cb->task = err ? NULL : task; /* caller needs to unwind instead */ 1117 cb->data = data; 1118 1119 i915_active_release(&cb->base); 1120 1121 return err; 1122 } 1123 1124 static int get_ppgtt(struct drm_i915_file_private *file_priv, 1125 struct i915_gem_context *ctx, 1126 struct drm_i915_gem_context_param *args) 1127 { 1128 struct i915_address_space *vm; 1129 int err; 1130 u32 id; 1131 1132 if (!rcu_access_pointer(ctx->vm)) 1133 return -ENODEV; 1134 1135 rcu_read_lock(); 1136 vm = context_get_vm_rcu(ctx); 1137 rcu_read_unlock(); 1138 if (!vm) 1139 return -ENODEV; 1140 1141 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 1142 if (err) 1143 goto err_put; 1144 1145 i915_vm_open(vm); 1146 1147 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1148 args->value = id; 1149 args->size = 0; 1150 1151 err_put: 1152 i915_vm_put(vm); 1153 return err; 1154 } 1155 1156 static void set_ppgtt_barrier(void *data) 1157 { 1158 struct i915_address_space *old = data; 1159 1160 if (GRAPHICS_VER(old->i915) < 8) 1161 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); 1162 1163 i915_vm_close(old); 1164 } 1165 1166 static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data) 1167 { 1168 struct i915_address_space *vm = ce->vm; 1169 1170 if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915)) 1171 /* ppGTT is not part of the legacy context image */ 1172 return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww); 1173 1174 return 0; 1175 } 1176 1177 static int emit_ppgtt_update(struct i915_request *rq, void *data) 1178 { 1179 struct i915_address_space *vm = rq->context->vm; 1180 struct intel_engine_cs *engine = rq->engine; 1181 u32 base = engine->mmio_base; 1182 u32 *cs; 1183 int i; 1184 1185 if (i915_vm_is_4lvl(vm)) { 1186 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1187 const dma_addr_t pd_daddr = px_dma(ppgtt->pd); 1188 1189 cs = intel_ring_begin(rq, 6); 1190 if (IS_ERR(cs)) 1191 return PTR_ERR(cs); 1192 1193 *cs++ = MI_LOAD_REGISTER_IMM(2); 1194 1195 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 1196 *cs++ = upper_32_bits(pd_daddr); 1197 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 1198 *cs++ = lower_32_bits(pd_daddr); 1199 1200 *cs++ = MI_NOOP; 1201 intel_ring_advance(rq, cs); 1202 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { 1203 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1204 int err; 1205 1206 /* Magic required to prevent forcewake errors! */ 1207 err = engine->emit_flush(rq, EMIT_INVALIDATE); 1208 if (err) 1209 return err; 1210 1211 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 1212 if (IS_ERR(cs)) 1213 return PTR_ERR(cs); 1214 1215 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 1216 for (i = GEN8_3LVL_PDPES; i--; ) { 1217 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1218 1219 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 1220 *cs++ = upper_32_bits(pd_daddr); 1221 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 1222 *cs++ = lower_32_bits(pd_daddr); 1223 } 1224 *cs++ = MI_NOOP; 1225 intel_ring_advance(rq, cs); 1226 } 1227 1228 return 0; 1229 } 1230 1231 static bool skip_ppgtt_update(struct intel_context *ce, void *data) 1232 { 1233 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) 1234 return !ce->state; 1235 else 1236 return !atomic_read(&ce->pin_count); 1237 } 1238 1239 static int set_ppgtt(struct drm_i915_file_private *file_priv, 1240 struct i915_gem_context *ctx, 1241 struct drm_i915_gem_context_param *args) 1242 { 1243 struct i915_address_space *vm, *old; 1244 int err; 1245 1246 if (args->size) 1247 return -EINVAL; 1248 1249 if (!rcu_access_pointer(ctx->vm)) 1250 return -ENODEV; 1251 1252 if (upper_32_bits(args->value)) 1253 return -ENOENT; 1254 1255 rcu_read_lock(); 1256 vm = xa_load(&file_priv->vm_xa, args->value); 1257 if (vm && !kref_get_unless_zero(&vm->ref)) 1258 vm = NULL; 1259 rcu_read_unlock(); 1260 if (!vm) 1261 return -ENOENT; 1262 1263 err = mutex_lock_interruptible(&ctx->mutex); 1264 if (err) 1265 goto out; 1266 1267 if (i915_gem_context_is_closed(ctx)) { 1268 err = -ENOENT; 1269 goto unlock; 1270 } 1271 1272 if (vm == rcu_access_pointer(ctx->vm)) 1273 goto unlock; 1274 1275 old = __set_ppgtt(ctx, vm); 1276 1277 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ 1278 lut_close(ctx); 1279 1280 /* 1281 * We need to flush any requests using the current ppgtt before 1282 * we release it as the requests do not hold a reference themselves, 1283 * only indirectly through the context. 1284 */ 1285 err = context_barrier_task(ctx, ALL_ENGINES, 1286 skip_ppgtt_update, 1287 pin_ppgtt_update, 1288 emit_ppgtt_update, 1289 set_ppgtt_barrier, 1290 old); 1291 if (err) { 1292 i915_vm_close(__set_ppgtt(ctx, old)); 1293 i915_vm_close(old); 1294 lut_close(ctx); /* force a rebuild of the old obj:vma cache */ 1295 } 1296 1297 unlock: 1298 mutex_unlock(&ctx->mutex); 1299 out: 1300 i915_vm_put(vm); 1301 return err; 1302 } 1303 1304 int 1305 i915_gem_user_to_context_sseu(struct intel_gt *gt, 1306 const struct drm_i915_gem_context_param_sseu *user, 1307 struct intel_sseu *context) 1308 { 1309 const struct sseu_dev_info *device = >->info.sseu; 1310 struct drm_i915_private *i915 = gt->i915; 1311 1312 /* No zeros in any field. */ 1313 if (!user->slice_mask || !user->subslice_mask || 1314 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1315 return -EINVAL; 1316 1317 /* Max > min. */ 1318 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1319 return -EINVAL; 1320 1321 /* 1322 * Some future proofing on the types since the uAPI is wider than the 1323 * current internal implementation. 1324 */ 1325 if (overflows_type(user->slice_mask, context->slice_mask) || 1326 overflows_type(user->subslice_mask, context->subslice_mask) || 1327 overflows_type(user->min_eus_per_subslice, 1328 context->min_eus_per_subslice) || 1329 overflows_type(user->max_eus_per_subslice, 1330 context->max_eus_per_subslice)) 1331 return -EINVAL; 1332 1333 /* Check validity against hardware. */ 1334 if (user->slice_mask & ~device->slice_mask) 1335 return -EINVAL; 1336 1337 if (user->subslice_mask & ~device->subslice_mask[0]) 1338 return -EINVAL; 1339 1340 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1341 return -EINVAL; 1342 1343 context->slice_mask = user->slice_mask; 1344 context->subslice_mask = user->subslice_mask; 1345 context->min_eus_per_subslice = user->min_eus_per_subslice; 1346 context->max_eus_per_subslice = user->max_eus_per_subslice; 1347 1348 /* Part specific restrictions. */ 1349 if (GRAPHICS_VER(i915) == 11) { 1350 unsigned int hw_s = hweight8(device->slice_mask); 1351 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 1352 unsigned int req_s = hweight8(context->slice_mask); 1353 unsigned int req_ss = hweight8(context->subslice_mask); 1354 1355 /* 1356 * Only full subslice enablement is possible if more than one 1357 * slice is turned on. 1358 */ 1359 if (req_s > 1 && req_ss != hw_ss_per_s) 1360 return -EINVAL; 1361 1362 /* 1363 * If more than four (SScount bitfield limit) subslices are 1364 * requested then the number has to be even. 1365 */ 1366 if (req_ss > 4 && (req_ss & 1)) 1367 return -EINVAL; 1368 1369 /* 1370 * If only one slice is enabled and subslice count is below the 1371 * device full enablement, it must be at most half of the all 1372 * available subslices. 1373 */ 1374 if (req_s == 1 && req_ss < hw_ss_per_s && 1375 req_ss > (hw_ss_per_s / 2)) 1376 return -EINVAL; 1377 1378 /* ABI restriction - VME use case only. */ 1379 1380 /* All slices or one slice only. */ 1381 if (req_s != 1 && req_s != hw_s) 1382 return -EINVAL; 1383 1384 /* 1385 * Half subslices or full enablement only when one slice is 1386 * enabled. 1387 */ 1388 if (req_s == 1 && 1389 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1390 return -EINVAL; 1391 1392 /* No EU configuration changes. */ 1393 if ((user->min_eus_per_subslice != 1394 device->max_eus_per_subslice) || 1395 (user->max_eus_per_subslice != 1396 device->max_eus_per_subslice)) 1397 return -EINVAL; 1398 } 1399 1400 return 0; 1401 } 1402 1403 static int set_sseu(struct i915_gem_context *ctx, 1404 struct drm_i915_gem_context_param *args) 1405 { 1406 struct drm_i915_private *i915 = ctx->i915; 1407 struct drm_i915_gem_context_param_sseu user_sseu; 1408 struct intel_context *ce; 1409 struct intel_sseu sseu; 1410 unsigned long lookup; 1411 int ret; 1412 1413 if (args->size < sizeof(user_sseu)) 1414 return -EINVAL; 1415 1416 if (GRAPHICS_VER(i915) != 11) 1417 return -ENODEV; 1418 1419 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1420 sizeof(user_sseu))) 1421 return -EFAULT; 1422 1423 if (user_sseu.rsvd) 1424 return -EINVAL; 1425 1426 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1427 return -EINVAL; 1428 1429 lookup = 0; 1430 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 1431 lookup |= LOOKUP_USER_INDEX; 1432 1433 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 1434 if (IS_ERR(ce)) 1435 return PTR_ERR(ce); 1436 1437 /* Only render engine supports RPCS configuration. */ 1438 if (ce->engine->class != RENDER_CLASS) { 1439 ret = -ENODEV; 1440 goto out_ce; 1441 } 1442 1443 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); 1444 if (ret) 1445 goto out_ce; 1446 1447 ret = intel_context_reconfigure_sseu(ce, sseu); 1448 if (ret) 1449 goto out_ce; 1450 1451 args->size = sizeof(user_sseu); 1452 1453 out_ce: 1454 intel_context_put(ce); 1455 return ret; 1456 } 1457 1458 struct set_engines { 1459 struct i915_gem_context *ctx; 1460 struct i915_gem_engines *engines; 1461 }; 1462 1463 static int 1464 set_engines__load_balance(struct i915_user_extension __user *base, void *data) 1465 { 1466 struct i915_context_engines_load_balance __user *ext = 1467 container_of_user(base, typeof(*ext), base); 1468 const struct set_engines *set = data; 1469 struct drm_i915_private *i915 = set->ctx->i915; 1470 struct intel_engine_cs *stack[16]; 1471 struct intel_engine_cs **siblings; 1472 struct intel_context *ce; 1473 u16 num_siblings, idx; 1474 unsigned int n; 1475 int err; 1476 1477 if (!HAS_EXECLISTS(i915)) 1478 return -ENODEV; 1479 1480 if (intel_uc_uses_guc_submission(&i915->gt.uc)) 1481 return -ENODEV; /* not implement yet */ 1482 1483 if (get_user(idx, &ext->engine_index)) 1484 return -EFAULT; 1485 1486 if (idx >= set->engines->num_engines) { 1487 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 1488 idx, set->engines->num_engines); 1489 return -EINVAL; 1490 } 1491 1492 idx = array_index_nospec(idx, set->engines->num_engines); 1493 if (set->engines->engines[idx]) { 1494 drm_dbg(&i915->drm, 1495 "Invalid placement[%d], already occupied\n", idx); 1496 return -EEXIST; 1497 } 1498 1499 if (get_user(num_siblings, &ext->num_siblings)) 1500 return -EFAULT; 1501 1502 err = check_user_mbz(&ext->flags); 1503 if (err) 1504 return err; 1505 1506 err = check_user_mbz(&ext->mbz64); 1507 if (err) 1508 return err; 1509 1510 siblings = stack; 1511 if (num_siblings > ARRAY_SIZE(stack)) { 1512 siblings = kmalloc_array(num_siblings, 1513 sizeof(*siblings), 1514 GFP_KERNEL); 1515 if (!siblings) 1516 return -ENOMEM; 1517 } 1518 1519 for (n = 0; n < num_siblings; n++) { 1520 struct i915_engine_class_instance ci; 1521 1522 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 1523 err = -EFAULT; 1524 goto out_siblings; 1525 } 1526 1527 siblings[n] = intel_engine_lookup_user(i915, 1528 ci.engine_class, 1529 ci.engine_instance); 1530 if (!siblings[n]) { 1531 drm_dbg(&i915->drm, 1532 "Invalid sibling[%d]: { class:%d, inst:%d }\n", 1533 n, ci.engine_class, ci.engine_instance); 1534 err = -EINVAL; 1535 goto out_siblings; 1536 } 1537 } 1538 1539 ce = intel_execlists_create_virtual(siblings, n); 1540 if (IS_ERR(ce)) { 1541 err = PTR_ERR(ce); 1542 goto out_siblings; 1543 } 1544 1545 intel_context_set_gem(ce, set->ctx); 1546 1547 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { 1548 intel_context_put(ce); 1549 err = -EEXIST; 1550 goto out_siblings; 1551 } 1552 1553 out_siblings: 1554 if (siblings != stack) 1555 kfree(siblings); 1556 1557 return err; 1558 } 1559 1560 static int 1561 set_engines__bond(struct i915_user_extension __user *base, void *data) 1562 { 1563 struct i915_context_engines_bond __user *ext = 1564 container_of_user(base, typeof(*ext), base); 1565 const struct set_engines *set = data; 1566 struct drm_i915_private *i915 = set->ctx->i915; 1567 struct i915_engine_class_instance ci; 1568 struct intel_engine_cs *virtual; 1569 struct intel_engine_cs *master; 1570 u16 idx, num_bonds; 1571 int err, n; 1572 1573 if (get_user(idx, &ext->virtual_index)) 1574 return -EFAULT; 1575 1576 if (idx >= set->engines->num_engines) { 1577 drm_dbg(&i915->drm, 1578 "Invalid index for virtual engine: %d >= %d\n", 1579 idx, set->engines->num_engines); 1580 return -EINVAL; 1581 } 1582 1583 idx = array_index_nospec(idx, set->engines->num_engines); 1584 if (!set->engines->engines[idx]) { 1585 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 1586 return -EINVAL; 1587 } 1588 virtual = set->engines->engines[idx]->engine; 1589 1590 err = check_user_mbz(&ext->flags); 1591 if (err) 1592 return err; 1593 1594 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 1595 err = check_user_mbz(&ext->mbz64[n]); 1596 if (err) 1597 return err; 1598 } 1599 1600 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 1601 return -EFAULT; 1602 1603 master = intel_engine_lookup_user(i915, 1604 ci.engine_class, ci.engine_instance); 1605 if (!master) { 1606 drm_dbg(&i915->drm, 1607 "Unrecognised master engine: { class:%u, instance:%u }\n", 1608 ci.engine_class, ci.engine_instance); 1609 return -EINVAL; 1610 } 1611 1612 if (get_user(num_bonds, &ext->num_bonds)) 1613 return -EFAULT; 1614 1615 for (n = 0; n < num_bonds; n++) { 1616 struct intel_engine_cs *bond; 1617 1618 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 1619 return -EFAULT; 1620 1621 bond = intel_engine_lookup_user(i915, 1622 ci.engine_class, 1623 ci.engine_instance); 1624 if (!bond) { 1625 drm_dbg(&i915->drm, 1626 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 1627 n, ci.engine_class, ci.engine_instance); 1628 return -EINVAL; 1629 } 1630 1631 /* 1632 * A non-virtual engine has no siblings to choose between; and 1633 * a submit fence will always be directed to the one engine. 1634 */ 1635 if (intel_engine_is_virtual(virtual)) { 1636 err = intel_virtual_engine_attach_bond(virtual, 1637 master, 1638 bond); 1639 if (err) 1640 return err; 1641 } 1642 } 1643 1644 return 0; 1645 } 1646 1647 static const i915_user_extension_fn set_engines__extensions[] = { 1648 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, 1649 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, 1650 }; 1651 1652 static int 1653 set_engines(struct i915_gem_context *ctx, 1654 const struct drm_i915_gem_context_param *args) 1655 { 1656 struct drm_i915_private *i915 = ctx->i915; 1657 struct i915_context_param_engines __user *user = 1658 u64_to_user_ptr(args->value); 1659 struct set_engines set = { .ctx = ctx }; 1660 unsigned int num_engines, n; 1661 u64 extensions; 1662 int err; 1663 1664 if (!args->size) { /* switch back to legacy user_ring_map */ 1665 if (!i915_gem_context_user_engines(ctx)) 1666 return 0; 1667 1668 set.engines = default_engines(ctx); 1669 if (IS_ERR(set.engines)) 1670 return PTR_ERR(set.engines); 1671 1672 goto replace; 1673 } 1674 1675 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); 1676 if (args->size < sizeof(*user) || 1677 !IS_ALIGNED(args->size, sizeof(*user->engines))) { 1678 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 1679 args->size); 1680 return -EINVAL; 1681 } 1682 1683 /* 1684 * Note that I915_EXEC_RING_MASK limits execbuf to only using the 1685 * first 64 engines defined here. 1686 */ 1687 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 1688 set.engines = alloc_engines(num_engines); 1689 if (!set.engines) 1690 return -ENOMEM; 1691 1692 for (n = 0; n < num_engines; n++) { 1693 struct i915_engine_class_instance ci; 1694 struct intel_engine_cs *engine; 1695 struct intel_context *ce; 1696 1697 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 1698 __free_engines(set.engines, n); 1699 return -EFAULT; 1700 } 1701 1702 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 1703 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { 1704 set.engines->engines[n] = NULL; 1705 continue; 1706 } 1707 1708 engine = intel_engine_lookup_user(ctx->i915, 1709 ci.engine_class, 1710 ci.engine_instance); 1711 if (!engine) { 1712 drm_dbg(&i915->drm, 1713 "Invalid engine[%d]: { class:%d, instance:%d }\n", 1714 n, ci.engine_class, ci.engine_instance); 1715 __free_engines(set.engines, n); 1716 return -ENOENT; 1717 } 1718 1719 ce = intel_context_create(engine); 1720 if (IS_ERR(ce)) { 1721 __free_engines(set.engines, n); 1722 return PTR_ERR(ce); 1723 } 1724 1725 intel_context_set_gem(ce, ctx); 1726 1727 set.engines->engines[n] = ce; 1728 } 1729 set.engines->num_engines = num_engines; 1730 1731 err = -EFAULT; 1732 if (!get_user(extensions, &user->extensions)) 1733 err = i915_user_extensions(u64_to_user_ptr(extensions), 1734 set_engines__extensions, 1735 ARRAY_SIZE(set_engines__extensions), 1736 &set); 1737 if (err) { 1738 free_engines(set.engines); 1739 return err; 1740 } 1741 1742 replace: 1743 mutex_lock(&ctx->engines_mutex); 1744 if (i915_gem_context_is_closed(ctx)) { 1745 mutex_unlock(&ctx->engines_mutex); 1746 free_engines(set.engines); 1747 return -ENOENT; 1748 } 1749 if (args->size) 1750 i915_gem_context_set_user_engines(ctx); 1751 else 1752 i915_gem_context_clear_user_engines(ctx); 1753 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); 1754 mutex_unlock(&ctx->engines_mutex); 1755 1756 /* Keep track of old engine sets for kill_context() */ 1757 engines_idle_release(ctx, set.engines); 1758 1759 return 0; 1760 } 1761 1762 static int 1763 get_engines(struct i915_gem_context *ctx, 1764 struct drm_i915_gem_context_param *args) 1765 { 1766 struct i915_context_param_engines __user *user; 1767 struct i915_gem_engines *e; 1768 size_t n, count, size; 1769 bool user_engines; 1770 int err = 0; 1771 1772 e = __context_engines_await(ctx, &user_engines); 1773 if (!e) 1774 return -ENOENT; 1775 1776 if (!user_engines) { 1777 i915_sw_fence_complete(&e->fence); 1778 args->size = 0; 1779 return 0; 1780 } 1781 1782 count = e->num_engines; 1783 1784 /* Be paranoid in case we have an impedance mismatch */ 1785 if (!check_struct_size(user, engines, count, &size)) { 1786 err = -EINVAL; 1787 goto err_free; 1788 } 1789 if (overflows_type(size, args->size)) { 1790 err = -EINVAL; 1791 goto err_free; 1792 } 1793 1794 if (!args->size) { 1795 args->size = size; 1796 goto err_free; 1797 } 1798 1799 if (args->size < size) { 1800 err = -EINVAL; 1801 goto err_free; 1802 } 1803 1804 user = u64_to_user_ptr(args->value); 1805 if (put_user(0, &user->extensions)) { 1806 err = -EFAULT; 1807 goto err_free; 1808 } 1809 1810 for (n = 0; n < count; n++) { 1811 struct i915_engine_class_instance ci = { 1812 .engine_class = I915_ENGINE_CLASS_INVALID, 1813 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, 1814 }; 1815 1816 if (e->engines[n]) { 1817 ci.engine_class = e->engines[n]->engine->uabi_class; 1818 ci.engine_instance = e->engines[n]->engine->uabi_instance; 1819 } 1820 1821 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { 1822 err = -EFAULT; 1823 goto err_free; 1824 } 1825 } 1826 1827 args->size = size; 1828 1829 err_free: 1830 i915_sw_fence_complete(&e->fence); 1831 return err; 1832 } 1833 1834 static int 1835 set_persistence(struct i915_gem_context *ctx, 1836 const struct drm_i915_gem_context_param *args) 1837 { 1838 if (args->size) 1839 return -EINVAL; 1840 1841 return __context_set_persistence(ctx, args->value); 1842 } 1843 1844 static int __apply_priority(struct intel_context *ce, void *arg) 1845 { 1846 struct i915_gem_context *ctx = arg; 1847 1848 if (!intel_engine_has_timeslices(ce->engine)) 1849 return 0; 1850 1851 if (ctx->sched.priority >= I915_PRIORITY_NORMAL) 1852 intel_context_set_use_semaphores(ce); 1853 else 1854 intel_context_clear_use_semaphores(ce); 1855 1856 return 0; 1857 } 1858 1859 static int set_priority(struct i915_gem_context *ctx, 1860 const struct drm_i915_gem_context_param *args) 1861 { 1862 s64 priority = args->value; 1863 1864 if (args->size) 1865 return -EINVAL; 1866 1867 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 1868 return -ENODEV; 1869 1870 if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 1871 priority < I915_CONTEXT_MIN_USER_PRIORITY) 1872 return -EINVAL; 1873 1874 if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 1875 !capable(CAP_SYS_NICE)) 1876 return -EPERM; 1877 1878 ctx->sched.priority = priority; 1879 context_apply_all(ctx, __apply_priority, ctx); 1880 1881 return 0; 1882 } 1883 1884 static int ctx_setparam(struct drm_i915_file_private *fpriv, 1885 struct i915_gem_context *ctx, 1886 struct drm_i915_gem_context_param *args) 1887 { 1888 int ret = 0; 1889 1890 switch (args->param) { 1891 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 1892 if (args->size) 1893 ret = -EINVAL; 1894 else if (args->value) 1895 i915_gem_context_set_no_error_capture(ctx); 1896 else 1897 i915_gem_context_clear_no_error_capture(ctx); 1898 break; 1899 1900 case I915_CONTEXT_PARAM_BANNABLE: 1901 if (args->size) 1902 ret = -EINVAL; 1903 else if (!capable(CAP_SYS_ADMIN) && !args->value) 1904 ret = -EPERM; 1905 else if (args->value) 1906 i915_gem_context_set_bannable(ctx); 1907 else 1908 i915_gem_context_clear_bannable(ctx); 1909 break; 1910 1911 case I915_CONTEXT_PARAM_RECOVERABLE: 1912 if (args->size) 1913 ret = -EINVAL; 1914 else if (args->value) 1915 i915_gem_context_set_recoverable(ctx); 1916 else 1917 i915_gem_context_clear_recoverable(ctx); 1918 break; 1919 1920 case I915_CONTEXT_PARAM_PRIORITY: 1921 ret = set_priority(ctx, args); 1922 break; 1923 1924 case I915_CONTEXT_PARAM_SSEU: 1925 ret = set_sseu(ctx, args); 1926 break; 1927 1928 case I915_CONTEXT_PARAM_VM: 1929 ret = set_ppgtt(fpriv, ctx, args); 1930 break; 1931 1932 case I915_CONTEXT_PARAM_ENGINES: 1933 ret = set_engines(ctx, args); 1934 break; 1935 1936 case I915_CONTEXT_PARAM_PERSISTENCE: 1937 ret = set_persistence(ctx, args); 1938 break; 1939 1940 case I915_CONTEXT_PARAM_NO_ZEROMAP: 1941 case I915_CONTEXT_PARAM_BAN_PERIOD: 1942 case I915_CONTEXT_PARAM_RINGSIZE: 1943 default: 1944 ret = -EINVAL; 1945 break; 1946 } 1947 1948 return ret; 1949 } 1950 1951 struct create_ext { 1952 struct i915_gem_context *ctx; 1953 struct drm_i915_file_private *fpriv; 1954 }; 1955 1956 static int create_setparam(struct i915_user_extension __user *ext, void *data) 1957 { 1958 struct drm_i915_gem_context_create_ext_setparam local; 1959 const struct create_ext *arg = data; 1960 1961 if (copy_from_user(&local, ext, sizeof(local))) 1962 return -EFAULT; 1963 1964 if (local.param.ctx_id) 1965 return -EINVAL; 1966 1967 return ctx_setparam(arg->fpriv, arg->ctx, &local.param); 1968 } 1969 1970 static int clone_engines(struct i915_gem_context *dst, 1971 struct i915_gem_context *src) 1972 { 1973 struct i915_gem_engines *clone, *e; 1974 bool user_engines; 1975 unsigned long n; 1976 1977 e = __context_engines_await(src, &user_engines); 1978 if (!e) 1979 return -ENOENT; 1980 1981 clone = alloc_engines(e->num_engines); 1982 if (!clone) 1983 goto err_unlock; 1984 1985 for (n = 0; n < e->num_engines; n++) { 1986 struct intel_engine_cs *engine; 1987 1988 if (!e->engines[n]) { 1989 clone->engines[n] = NULL; 1990 continue; 1991 } 1992 engine = e->engines[n]->engine; 1993 1994 /* 1995 * Virtual engines are singletons; they can only exist 1996 * inside a single context, because they embed their 1997 * HW context... As each virtual context implies a single 1998 * timeline (each engine can only dequeue a single request 1999 * at any time), it would be surprising for two contexts 2000 * to use the same engine. So let's create a copy of 2001 * the virtual engine instead. 2002 */ 2003 if (intel_engine_is_virtual(engine)) 2004 clone->engines[n] = 2005 intel_execlists_clone_virtual(engine); 2006 else 2007 clone->engines[n] = intel_context_create(engine); 2008 if (IS_ERR_OR_NULL(clone->engines[n])) { 2009 __free_engines(clone, n); 2010 goto err_unlock; 2011 } 2012 2013 intel_context_set_gem(clone->engines[n], dst); 2014 } 2015 clone->num_engines = n; 2016 i915_sw_fence_complete(&e->fence); 2017 2018 /* Serialised by constructor */ 2019 engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1)); 2020 if (user_engines) 2021 i915_gem_context_set_user_engines(dst); 2022 else 2023 i915_gem_context_clear_user_engines(dst); 2024 return 0; 2025 2026 err_unlock: 2027 i915_sw_fence_complete(&e->fence); 2028 return -ENOMEM; 2029 } 2030 2031 static int clone_flags(struct i915_gem_context *dst, 2032 struct i915_gem_context *src) 2033 { 2034 dst->user_flags = src->user_flags; 2035 return 0; 2036 } 2037 2038 static int clone_schedattr(struct i915_gem_context *dst, 2039 struct i915_gem_context *src) 2040 { 2041 dst->sched = src->sched; 2042 return 0; 2043 } 2044 2045 static int clone_sseu(struct i915_gem_context *dst, 2046 struct i915_gem_context *src) 2047 { 2048 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 2049 struct i915_gem_engines *clone; 2050 unsigned long n; 2051 int err; 2052 2053 /* no locking required; sole access under constructor*/ 2054 clone = __context_engines_static(dst); 2055 if (e->num_engines != clone->num_engines) { 2056 err = -EINVAL; 2057 goto unlock; 2058 } 2059 2060 for (n = 0; n < e->num_engines; n++) { 2061 struct intel_context *ce = e->engines[n]; 2062 2063 if (clone->engines[n]->engine->class != ce->engine->class) { 2064 /* Must have compatible engine maps! */ 2065 err = -EINVAL; 2066 goto unlock; 2067 } 2068 2069 /* serialises with set_sseu */ 2070 err = intel_context_lock_pinned(ce); 2071 if (err) 2072 goto unlock; 2073 2074 clone->engines[n]->sseu = ce->sseu; 2075 intel_context_unlock_pinned(ce); 2076 } 2077 2078 err = 0; 2079 unlock: 2080 i915_gem_context_unlock_engines(src); 2081 return err; 2082 } 2083 2084 static int clone_timeline(struct i915_gem_context *dst, 2085 struct i915_gem_context *src) 2086 { 2087 if (src->timeline) 2088 __assign_timeline(dst, src->timeline); 2089 2090 return 0; 2091 } 2092 2093 static int clone_vm(struct i915_gem_context *dst, 2094 struct i915_gem_context *src) 2095 { 2096 struct i915_address_space *vm; 2097 int err = 0; 2098 2099 if (!rcu_access_pointer(src->vm)) 2100 return 0; 2101 2102 rcu_read_lock(); 2103 vm = context_get_vm_rcu(src); 2104 rcu_read_unlock(); 2105 2106 if (!mutex_lock_interruptible(&dst->mutex)) { 2107 __assign_ppgtt(dst, vm); 2108 mutex_unlock(&dst->mutex); 2109 } else { 2110 err = -EINTR; 2111 } 2112 2113 i915_vm_put(vm); 2114 return err; 2115 } 2116 2117 static int create_clone(struct i915_user_extension __user *ext, void *data) 2118 { 2119 static int (* const fn[])(struct i915_gem_context *dst, 2120 struct i915_gem_context *src) = { 2121 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y 2122 MAP(ENGINES, clone_engines), 2123 MAP(FLAGS, clone_flags), 2124 MAP(SCHEDATTR, clone_schedattr), 2125 MAP(SSEU, clone_sseu), 2126 MAP(TIMELINE, clone_timeline), 2127 MAP(VM, clone_vm), 2128 #undef MAP 2129 }; 2130 struct drm_i915_gem_context_create_ext_clone local; 2131 const struct create_ext *arg = data; 2132 struct i915_gem_context *dst = arg->ctx; 2133 struct i915_gem_context *src; 2134 int err, bit; 2135 2136 if (copy_from_user(&local, ext, sizeof(local))) 2137 return -EFAULT; 2138 2139 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != 2140 I915_CONTEXT_CLONE_UNKNOWN); 2141 2142 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) 2143 return -EINVAL; 2144 2145 if (local.rsvd) 2146 return -EINVAL; 2147 2148 rcu_read_lock(); 2149 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); 2150 rcu_read_unlock(); 2151 if (!src) 2152 return -ENOENT; 2153 2154 GEM_BUG_ON(src == dst); 2155 2156 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { 2157 if (!(local.flags & BIT(bit))) 2158 continue; 2159 2160 err = fn[bit](dst, src); 2161 if (err) 2162 return err; 2163 } 2164 2165 return 0; 2166 } 2167 2168 static const i915_user_extension_fn create_extensions[] = { 2169 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 2170 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, 2171 }; 2172 2173 static bool client_is_banned(struct drm_i915_file_private *file_priv) 2174 { 2175 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 2176 } 2177 2178 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2179 struct drm_file *file) 2180 { 2181 struct drm_i915_private *i915 = to_i915(dev); 2182 struct drm_i915_gem_context_create_ext *args = data; 2183 struct create_ext ext_data; 2184 int ret; 2185 u32 id; 2186 2187 if (!DRIVER_CAPS(i915)->has_logical_contexts) 2188 return -ENODEV; 2189 2190 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 2191 return -EINVAL; 2192 2193 ret = intel_gt_terminally_wedged(&i915->gt); 2194 if (ret) 2195 return ret; 2196 2197 ext_data.fpriv = file->driver_priv; 2198 if (client_is_banned(ext_data.fpriv)) { 2199 drm_dbg(&i915->drm, 2200 "client %s[%d] banned from creating ctx\n", 2201 current->comm, task_pid_nr(current)); 2202 return -EIO; 2203 } 2204 2205 ext_data.ctx = i915_gem_create_context(i915, args->flags); 2206 if (IS_ERR(ext_data.ctx)) 2207 return PTR_ERR(ext_data.ctx); 2208 2209 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 2210 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 2211 create_extensions, 2212 ARRAY_SIZE(create_extensions), 2213 &ext_data); 2214 if (ret) 2215 goto err_ctx; 2216 } 2217 2218 ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); 2219 if (ret < 0) 2220 goto err_ctx; 2221 2222 args->ctx_id = id; 2223 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); 2224 2225 return 0; 2226 2227 err_ctx: 2228 context_close(ext_data.ctx); 2229 return ret; 2230 } 2231 2232 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2233 struct drm_file *file) 2234 { 2235 struct drm_i915_gem_context_destroy *args = data; 2236 struct drm_i915_file_private *file_priv = file->driver_priv; 2237 struct i915_gem_context *ctx; 2238 2239 if (args->pad != 0) 2240 return -EINVAL; 2241 2242 if (!args->ctx_id) 2243 return -ENOENT; 2244 2245 ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2246 if (!ctx) 2247 return -ENOENT; 2248 2249 context_close(ctx); 2250 return 0; 2251 } 2252 2253 static int get_sseu(struct i915_gem_context *ctx, 2254 struct drm_i915_gem_context_param *args) 2255 { 2256 struct drm_i915_gem_context_param_sseu user_sseu; 2257 struct intel_context *ce; 2258 unsigned long lookup; 2259 int err; 2260 2261 if (args->size == 0) 2262 goto out; 2263 else if (args->size < sizeof(user_sseu)) 2264 return -EINVAL; 2265 2266 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2267 sizeof(user_sseu))) 2268 return -EFAULT; 2269 2270 if (user_sseu.rsvd) 2271 return -EINVAL; 2272 2273 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2274 return -EINVAL; 2275 2276 lookup = 0; 2277 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2278 lookup |= LOOKUP_USER_INDEX; 2279 2280 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2281 if (IS_ERR(ce)) 2282 return PTR_ERR(ce); 2283 2284 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2285 if (err) { 2286 intel_context_put(ce); 2287 return err; 2288 } 2289 2290 user_sseu.slice_mask = ce->sseu.slice_mask; 2291 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2292 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2293 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2294 2295 intel_context_unlock_pinned(ce); 2296 intel_context_put(ce); 2297 2298 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2299 sizeof(user_sseu))) 2300 return -EFAULT; 2301 2302 out: 2303 args->size = sizeof(user_sseu); 2304 2305 return 0; 2306 } 2307 2308 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2309 struct drm_file *file) 2310 { 2311 struct drm_i915_file_private *file_priv = file->driver_priv; 2312 struct drm_i915_gem_context_param *args = data; 2313 struct i915_gem_context *ctx; 2314 int ret = 0; 2315 2316 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2317 if (!ctx) 2318 return -ENOENT; 2319 2320 switch (args->param) { 2321 case I915_CONTEXT_PARAM_GTT_SIZE: 2322 args->size = 0; 2323 rcu_read_lock(); 2324 if (rcu_access_pointer(ctx->vm)) 2325 args->value = rcu_dereference(ctx->vm)->total; 2326 else 2327 args->value = to_i915(dev)->ggtt.vm.total; 2328 rcu_read_unlock(); 2329 break; 2330 2331 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2332 args->size = 0; 2333 args->value = i915_gem_context_no_error_capture(ctx); 2334 break; 2335 2336 case I915_CONTEXT_PARAM_BANNABLE: 2337 args->size = 0; 2338 args->value = i915_gem_context_is_bannable(ctx); 2339 break; 2340 2341 case I915_CONTEXT_PARAM_RECOVERABLE: 2342 args->size = 0; 2343 args->value = i915_gem_context_is_recoverable(ctx); 2344 break; 2345 2346 case I915_CONTEXT_PARAM_PRIORITY: 2347 args->size = 0; 2348 args->value = ctx->sched.priority; 2349 break; 2350 2351 case I915_CONTEXT_PARAM_SSEU: 2352 ret = get_sseu(ctx, args); 2353 break; 2354 2355 case I915_CONTEXT_PARAM_VM: 2356 ret = get_ppgtt(file_priv, ctx, args); 2357 break; 2358 2359 case I915_CONTEXT_PARAM_ENGINES: 2360 ret = get_engines(ctx, args); 2361 break; 2362 2363 case I915_CONTEXT_PARAM_PERSISTENCE: 2364 args->size = 0; 2365 args->value = i915_gem_context_is_persistent(ctx); 2366 break; 2367 2368 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2369 case I915_CONTEXT_PARAM_BAN_PERIOD: 2370 case I915_CONTEXT_PARAM_RINGSIZE: 2371 default: 2372 ret = -EINVAL; 2373 break; 2374 } 2375 2376 i915_gem_context_put(ctx); 2377 return ret; 2378 } 2379 2380 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2381 struct drm_file *file) 2382 { 2383 struct drm_i915_file_private *file_priv = file->driver_priv; 2384 struct drm_i915_gem_context_param *args = data; 2385 struct i915_gem_context *ctx; 2386 int ret; 2387 2388 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2389 if (!ctx) 2390 return -ENOENT; 2391 2392 ret = ctx_setparam(file_priv, ctx, args); 2393 2394 i915_gem_context_put(ctx); 2395 return ret; 2396 } 2397 2398 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2399 void *data, struct drm_file *file) 2400 { 2401 struct drm_i915_private *i915 = to_i915(dev); 2402 struct drm_i915_reset_stats *args = data; 2403 struct i915_gem_context *ctx; 2404 int ret; 2405 2406 if (args->flags || args->pad) 2407 return -EINVAL; 2408 2409 ret = -ENOENT; 2410 rcu_read_lock(); 2411 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); 2412 if (!ctx) 2413 goto out; 2414 2415 /* 2416 * We opt for unserialised reads here. This may result in tearing 2417 * in the extremely unlikely event of a GPU hang on this context 2418 * as we are querying them. If we need that extra layer of protection, 2419 * we should wrap the hangstats with a seqlock. 2420 */ 2421 2422 if (capable(CAP_SYS_ADMIN)) 2423 args->reset_count = i915_reset_count(&i915->gpu_error); 2424 else 2425 args->reset_count = 0; 2426 2427 args->batch_active = atomic_read(&ctx->guilty_count); 2428 args->batch_pending = atomic_read(&ctx->active_count); 2429 2430 ret = 0; 2431 out: 2432 rcu_read_unlock(); 2433 return ret; 2434 } 2435 2436 /* GEM context-engines iterator: for_each_gem_engine() */ 2437 struct intel_context * 2438 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2439 { 2440 const struct i915_gem_engines *e = it->engines; 2441 struct intel_context *ctx; 2442 2443 if (unlikely(!e)) 2444 return NULL; 2445 2446 do { 2447 if (it->idx >= e->num_engines) 2448 return NULL; 2449 2450 ctx = e->engines[it->idx++]; 2451 } while (!ctx); 2452 2453 return ctx; 2454 } 2455 2456 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2457 #include "selftests/mock_context.c" 2458 #include "selftests/i915_gem_context.c" 2459 #endif 2460 2461 static void i915_global_gem_context_shrink(void) 2462 { 2463 kmem_cache_shrink(global.slab_luts); 2464 } 2465 2466 static void i915_global_gem_context_exit(void) 2467 { 2468 kmem_cache_destroy(global.slab_luts); 2469 } 2470 2471 static struct i915_global_gem_context global = { { 2472 .shrink = i915_global_gem_context_shrink, 2473 .exit = i915_global_gem_context_exit, 2474 } }; 2475 2476 int __init i915_global_gem_context_init(void) 2477 { 2478 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2479 if (!global.slab_luts) 2480 return -ENOMEM; 2481 2482 i915_global_register(&global.base); 2483 return 0; 2484 } 2485