1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2011-2012 Intel Corporation 5 */ 6 7 /* 8 * This file implements HW context support. On gen5+ a HW context consists of an 9 * opaque GPU object which is referenced at times of context saves and restores. 10 * With RC6 enabled, the context is also referenced as the GPU enters and exists 11 * from RC6 (GPU has it's own internal power context, except on gen5). Though 12 * something like a context does exist for the media ring, the code only 13 * supports contexts for the render ring. 14 * 15 * In software, there is a distinction between contexts created by the user, 16 * and the default HW context. The default HW context is used by GPU clients 17 * that do not request setup of their own hardware context. The default 18 * context's state is never restored to help prevent programming errors. This 19 * would happen if a client ran and piggy-backed off another clients GPU state. 20 * The default context only exists to give the GPU some offset to load as the 21 * current to invoke a save of the context we actually care about. In fact, the 22 * code could likely be constructed, albeit in a more complicated fashion, to 23 * never use the default context, though that limits the driver's ability to 24 * swap out, and/or destroy other contexts. 25 * 26 * All other contexts are created as a request by the GPU client. These contexts 27 * store GPU state, and thus allow GPU clients to not re-emit state (and 28 * potentially query certain state) at any time. The kernel driver makes 29 * certain that the appropriate commands are inserted. 30 * 31 * The context life cycle is semi-complicated in that context BOs may live 32 * longer than the context itself because of the way the hardware, and object 33 * tracking works. Below is a very crude representation of the state machine 34 * describing the context life. 35 * refcount pincount active 36 * S0: initial state 0 0 0 37 * S1: context created 1 0 0 38 * S2: context is currently running 2 1 X 39 * S3: GPU referenced, but not current 2 0 1 40 * S4: context is current, but destroyed 1 1 0 41 * S5: like S3, but destroyed 1 0 1 42 * 43 * The most common (but not all) transitions: 44 * S0->S1: client creates a context 45 * S1->S2: client submits execbuf with context 46 * S2->S3: other clients submits execbuf with context 47 * S3->S1: context object was retired 48 * S3->S2: clients submits another execbuf 49 * S2->S4: context destroy called with current context 50 * S3->S5->S0: destroy path 51 * S4->S5->S0: destroy path on current context 52 * 53 * There are two confusing terms used above: 54 * The "current context" means the context which is currently running on the 55 * GPU. The GPU has loaded its state already and has stored away the gtt 56 * offset of the BO. The GPU is not actively referencing the data at this 57 * offset, but it will on the next context switch. The only way to avoid this 58 * is to do a GPU reset. 59 * 60 * An "active context' is one which was previously the "current context" and is 61 * on the active list waiting for the next context switch to occur. Until this 62 * happens, the object must remain at the same gtt offset. It is therefore 63 * possible to destroy a context, but it is still active. 64 * 65 */ 66 67 #include <linux/log2.h> 68 #include <linux/nospec.h> 69 70 #include "gt/gen6_ppgtt.h" 71 #include "gt/intel_context.h" 72 #include "gt/intel_context_param.h" 73 #include "gt/intel_engine_heartbeat.h" 74 #include "gt/intel_engine_user.h" 75 #include "gt/intel_ring.h" 76 77 #include "i915_gem_context.h" 78 #include "i915_globals.h" 79 #include "i915_trace.h" 80 #include "i915_user_extensions.h" 81 82 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 83 84 static struct i915_global_gem_context { 85 struct i915_global base; 86 struct kmem_cache *slab_luts; 87 } global; 88 89 struct i915_lut_handle *i915_lut_handle_alloc(void) 90 { 91 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); 92 } 93 94 void i915_lut_handle_free(struct i915_lut_handle *lut) 95 { 96 return kmem_cache_free(global.slab_luts, lut); 97 } 98 99 static void lut_close(struct i915_gem_context *ctx) 100 { 101 struct radix_tree_iter iter; 102 void __rcu **slot; 103 104 mutex_lock(&ctx->lut_mutex); 105 rcu_read_lock(); 106 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 107 struct i915_vma *vma = rcu_dereference_raw(*slot); 108 struct drm_i915_gem_object *obj = vma->obj; 109 struct i915_lut_handle *lut; 110 111 if (!kref_get_unless_zero(&obj->base.refcount)) 112 continue; 113 114 spin_lock(&obj->lut_lock); 115 list_for_each_entry(lut, &obj->lut_list, obj_link) { 116 if (lut->ctx != ctx) 117 continue; 118 119 if (lut->handle != iter.index) 120 continue; 121 122 list_del(&lut->obj_link); 123 break; 124 } 125 spin_unlock(&obj->lut_lock); 126 127 if (&lut->obj_link != &obj->lut_list) { 128 i915_lut_handle_free(lut); 129 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 130 i915_vma_close(vma); 131 i915_gem_object_put(obj); 132 } 133 134 i915_gem_object_put(obj); 135 } 136 rcu_read_unlock(); 137 mutex_unlock(&ctx->lut_mutex); 138 } 139 140 static struct intel_context * 141 lookup_user_engine(struct i915_gem_context *ctx, 142 unsigned long flags, 143 const struct i915_engine_class_instance *ci) 144 #define LOOKUP_USER_INDEX BIT(0) 145 { 146 int idx; 147 148 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 149 return ERR_PTR(-EINVAL); 150 151 if (!i915_gem_context_user_engines(ctx)) { 152 struct intel_engine_cs *engine; 153 154 engine = intel_engine_lookup_user(ctx->i915, 155 ci->engine_class, 156 ci->engine_instance); 157 if (!engine) 158 return ERR_PTR(-EINVAL); 159 160 idx = engine->legacy_idx; 161 } else { 162 idx = ci->engine_instance; 163 } 164 165 return i915_gem_context_get_engine(ctx, idx); 166 } 167 168 static struct i915_address_space * 169 context_get_vm_rcu(struct i915_gem_context *ctx) 170 { 171 GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); 172 173 do { 174 struct i915_address_space *vm; 175 176 /* 177 * We do not allow downgrading from full-ppgtt [to a shared 178 * global gtt], so ctx->vm cannot become NULL. 179 */ 180 vm = rcu_dereference(ctx->vm); 181 if (!kref_get_unless_zero(&vm->ref)) 182 continue; 183 184 /* 185 * This ppgtt may have be reallocated between 186 * the read and the kref, and reassigned to a third 187 * context. In order to avoid inadvertent sharing 188 * of this ppgtt with that third context (and not 189 * src), we have to confirm that we have the same 190 * ppgtt after passing through the strong memory 191 * barrier implied by a successful 192 * kref_get_unless_zero(). 193 * 194 * Once we have acquired the current ppgtt of ctx, 195 * we no longer care if it is released from ctx, as 196 * it cannot be reallocated elsewhere. 197 */ 198 199 if (vm == rcu_access_pointer(ctx->vm)) 200 return rcu_pointer_handoff(vm); 201 202 i915_vm_put(vm); 203 } while (1); 204 } 205 206 static void intel_context_set_gem(struct intel_context *ce, 207 struct i915_gem_context *ctx) 208 { 209 GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 210 RCU_INIT_POINTER(ce->gem_context, ctx); 211 212 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 213 ce->ring = __intel_context_ring_size(SZ_16K); 214 215 if (rcu_access_pointer(ctx->vm)) { 216 struct i915_address_space *vm; 217 218 rcu_read_lock(); 219 vm = context_get_vm_rcu(ctx); /* hmm */ 220 rcu_read_unlock(); 221 222 i915_vm_put(ce->vm); 223 ce->vm = vm; 224 } 225 226 GEM_BUG_ON(ce->timeline); 227 if (ctx->timeline) 228 ce->timeline = intel_timeline_get(ctx->timeline); 229 230 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 231 intel_engine_has_timeslices(ce->engine)) 232 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 233 } 234 235 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 236 { 237 while (count--) { 238 if (!e->engines[count]) 239 continue; 240 241 intel_context_put(e->engines[count]); 242 } 243 kfree(e); 244 } 245 246 static void free_engines(struct i915_gem_engines *e) 247 { 248 __free_engines(e, e->num_engines); 249 } 250 251 static void free_engines_rcu(struct rcu_head *rcu) 252 { 253 struct i915_gem_engines *engines = 254 container_of(rcu, struct i915_gem_engines, rcu); 255 256 i915_sw_fence_fini(&engines->fence); 257 free_engines(engines); 258 } 259 260 static int __i915_sw_fence_call 261 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 262 { 263 struct i915_gem_engines *engines = 264 container_of(fence, typeof(*engines), fence); 265 266 switch (state) { 267 case FENCE_COMPLETE: 268 if (!list_empty(&engines->link)) { 269 struct i915_gem_context *ctx = engines->ctx; 270 unsigned long flags; 271 272 spin_lock_irqsave(&ctx->stale.lock, flags); 273 list_del(&engines->link); 274 spin_unlock_irqrestore(&ctx->stale.lock, flags); 275 } 276 i915_gem_context_put(engines->ctx); 277 break; 278 279 case FENCE_FREE: 280 init_rcu_head(&engines->rcu); 281 call_rcu(&engines->rcu, free_engines_rcu); 282 break; 283 } 284 285 return NOTIFY_DONE; 286 } 287 288 static struct i915_gem_engines *alloc_engines(unsigned int count) 289 { 290 struct i915_gem_engines *e; 291 292 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); 293 if (!e) 294 return NULL; 295 296 i915_sw_fence_init(&e->fence, engines_notify); 297 return e; 298 } 299 300 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) 301 { 302 const struct intel_gt *gt = &ctx->i915->gt; 303 struct intel_engine_cs *engine; 304 struct i915_gem_engines *e; 305 enum intel_engine_id id; 306 307 e = alloc_engines(I915_NUM_ENGINES); 308 if (!e) 309 return ERR_PTR(-ENOMEM); 310 311 for_each_engine(engine, gt, id) { 312 struct intel_context *ce; 313 314 if (engine->legacy_idx == INVALID_ENGINE) 315 continue; 316 317 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 318 GEM_BUG_ON(e->engines[engine->legacy_idx]); 319 320 ce = intel_context_create(engine); 321 if (IS_ERR(ce)) { 322 __free_engines(e, e->num_engines + 1); 323 return ERR_CAST(ce); 324 } 325 326 intel_context_set_gem(ce, ctx); 327 328 e->engines[engine->legacy_idx] = ce; 329 e->num_engines = max(e->num_engines, engine->legacy_idx); 330 } 331 e->num_engines++; 332 333 return e; 334 } 335 336 static void i915_gem_context_free(struct i915_gem_context *ctx) 337 { 338 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 339 340 spin_lock(&ctx->i915->gem.contexts.lock); 341 list_del(&ctx->link); 342 spin_unlock(&ctx->i915->gem.contexts.lock); 343 344 mutex_destroy(&ctx->engines_mutex); 345 mutex_destroy(&ctx->lut_mutex); 346 347 if (ctx->timeline) 348 intel_timeline_put(ctx->timeline); 349 350 put_pid(ctx->pid); 351 mutex_destroy(&ctx->mutex); 352 353 kfree_rcu(ctx, rcu); 354 } 355 356 static void contexts_free_all(struct llist_node *list) 357 { 358 struct i915_gem_context *ctx, *cn; 359 360 llist_for_each_entry_safe(ctx, cn, list, free_link) 361 i915_gem_context_free(ctx); 362 } 363 364 static void contexts_flush_free(struct i915_gem_contexts *gc) 365 { 366 contexts_free_all(llist_del_all(&gc->free_list)); 367 } 368 369 static void contexts_free_worker(struct work_struct *work) 370 { 371 struct i915_gem_contexts *gc = 372 container_of(work, typeof(*gc), free_work); 373 374 contexts_flush_free(gc); 375 } 376 377 void i915_gem_context_release(struct kref *ref) 378 { 379 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 380 struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; 381 382 trace_i915_context_free(ctx); 383 if (llist_add(&ctx->free_link, &gc->free_list)) 384 schedule_work(&gc->free_work); 385 } 386 387 static inline struct i915_gem_engines * 388 __context_engines_static(const struct i915_gem_context *ctx) 389 { 390 return rcu_dereference_protected(ctx->engines, true); 391 } 392 393 static bool __reset_engine(struct intel_engine_cs *engine) 394 { 395 struct intel_gt *gt = engine->gt; 396 bool success = false; 397 398 if (!intel_has_reset_engine(gt)) 399 return false; 400 401 if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, 402 >->reset.flags)) { 403 success = intel_engine_reset(engine, NULL) == 0; 404 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, 405 >->reset.flags); 406 } 407 408 return success; 409 } 410 411 static void __reset_context(struct i915_gem_context *ctx, 412 struct intel_engine_cs *engine) 413 { 414 intel_gt_handle_error(engine->gt, engine->mask, 0, 415 "context closure in %s", ctx->name); 416 } 417 418 static bool __cancel_engine(struct intel_engine_cs *engine) 419 { 420 /* 421 * Send a "high priority pulse" down the engine to cause the 422 * current request to be momentarily preempted. (If it fails to 423 * be preempted, it will be reset). As we have marked our context 424 * as banned, any incomplete request, including any running, will 425 * be skipped following the preemption. 426 * 427 * If there is no hangchecking (one of the reasons why we try to 428 * cancel the context) and no forced preemption, there may be no 429 * means by which we reset the GPU and evict the persistent hog. 430 * Ergo if we are unable to inject a preemptive pulse that can 431 * kill the banned context, we fallback to doing a local reset 432 * instead. 433 */ 434 if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && 435 !intel_engine_pulse(engine)) 436 return true; 437 438 /* If we are unable to send a pulse, try resetting this engine. */ 439 return __reset_engine(engine); 440 } 441 442 static struct intel_engine_cs *__active_engine(struct i915_request *rq) 443 { 444 struct intel_engine_cs *engine, *locked; 445 446 /* 447 * Serialise with __i915_request_submit() so that it sees 448 * is-banned?, or we know the request is already inflight. 449 */ 450 locked = READ_ONCE(rq->engine); 451 spin_lock_irq(&locked->active.lock); 452 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 453 spin_unlock(&locked->active.lock); 454 spin_lock(&engine->active.lock); 455 locked = engine; 456 } 457 458 engine = NULL; 459 if (i915_request_is_active(rq) && rq->fence.error != -EIO) 460 engine = rq->engine; 461 462 spin_unlock_irq(&locked->active.lock); 463 464 return engine; 465 } 466 467 static struct intel_engine_cs *active_engine(struct intel_context *ce) 468 { 469 struct intel_engine_cs *engine = NULL; 470 struct i915_request *rq; 471 472 if (!ce->timeline) 473 return NULL; 474 475 mutex_lock(&ce->timeline->mutex); 476 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 477 if (i915_request_completed(rq)) 478 break; 479 480 /* Check with the backend if the request is inflight */ 481 engine = __active_engine(rq); 482 if (engine) 483 break; 484 } 485 mutex_unlock(&ce->timeline->mutex); 486 487 return engine; 488 } 489 490 static void kill_engines(struct i915_gem_engines *engines) 491 { 492 struct i915_gem_engines_iter it; 493 struct intel_context *ce; 494 495 /* 496 * Map the user's engine back to the actual engines; one virtual 497 * engine will be mapped to multiple engines, and using ctx->engine[] 498 * the same engine may be have multiple instances in the user's map. 499 * However, we only care about pending requests, so only include 500 * engines on which there are incomplete requests. 501 */ 502 for_each_gem_engine(ce, engines, it) { 503 struct intel_engine_cs *engine; 504 505 if (intel_context_set_banned(ce)) 506 continue; 507 508 /* 509 * Check the current active state of this context; if we 510 * are currently executing on the GPU we need to evict 511 * ourselves. On the other hand, if we haven't yet been 512 * submitted to the GPU or if everything is complete, 513 * we have nothing to do. 514 */ 515 engine = active_engine(ce); 516 517 /* First attempt to gracefully cancel the context */ 518 if (engine && !__cancel_engine(engine)) 519 /* 520 * If we are unable to send a preemptive pulse to bump 521 * the context from the GPU, we have to resort to a full 522 * reset. We hope the collateral damage is worth it. 523 */ 524 __reset_context(engines->ctx, engine); 525 } 526 } 527 528 static void kill_stale_engines(struct i915_gem_context *ctx) 529 { 530 struct i915_gem_engines *pos, *next; 531 532 spin_lock_irq(&ctx->stale.lock); 533 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 534 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { 535 if (!i915_sw_fence_await(&pos->fence)) { 536 list_del_init(&pos->link); 537 continue; 538 } 539 540 spin_unlock_irq(&ctx->stale.lock); 541 542 kill_engines(pos); 543 544 spin_lock_irq(&ctx->stale.lock); 545 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); 546 list_safe_reset_next(pos, next, link); 547 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ 548 549 i915_sw_fence_complete(&pos->fence); 550 } 551 spin_unlock_irq(&ctx->stale.lock); 552 } 553 554 static void kill_context(struct i915_gem_context *ctx) 555 { 556 kill_stale_engines(ctx); 557 } 558 559 static void engines_idle_release(struct i915_gem_context *ctx, 560 struct i915_gem_engines *engines) 561 { 562 struct i915_gem_engines_iter it; 563 struct intel_context *ce; 564 565 INIT_LIST_HEAD(&engines->link); 566 567 engines->ctx = i915_gem_context_get(ctx); 568 569 for_each_gem_engine(ce, engines, it) { 570 int err; 571 572 /* serialises with execbuf */ 573 set_bit(CONTEXT_CLOSED_BIT, &ce->flags); 574 if (!intel_context_pin_if_active(ce)) 575 continue; 576 577 /* Wait until context is finally scheduled out and retired */ 578 err = i915_sw_fence_await_active(&engines->fence, 579 &ce->active, 580 I915_ACTIVE_AWAIT_BARRIER); 581 intel_context_unpin(ce); 582 if (err) 583 goto kill; 584 } 585 586 spin_lock_irq(&ctx->stale.lock); 587 if (!i915_gem_context_is_closed(ctx)) 588 list_add_tail(&engines->link, &ctx->stale.engines); 589 spin_unlock_irq(&ctx->stale.lock); 590 591 kill: 592 if (list_empty(&engines->link)) /* raced, already closed */ 593 kill_engines(engines); 594 595 i915_sw_fence_commit(&engines->fence); 596 } 597 598 static void set_closed_name(struct i915_gem_context *ctx) 599 { 600 char *s; 601 602 /* Replace '[]' with '<>' to indicate closed in debug prints */ 603 604 s = strrchr(ctx->name, '['); 605 if (!s) 606 return; 607 608 *s = '<'; 609 610 s = strchr(s + 1, ']'); 611 if (s) 612 *s = '>'; 613 } 614 615 static void context_close(struct i915_gem_context *ctx) 616 { 617 struct i915_address_space *vm; 618 619 /* Flush any concurrent set_engines() */ 620 mutex_lock(&ctx->engines_mutex); 621 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); 622 i915_gem_context_set_closed(ctx); 623 mutex_unlock(&ctx->engines_mutex); 624 625 mutex_lock(&ctx->mutex); 626 627 set_closed_name(ctx); 628 629 vm = i915_gem_context_vm(ctx); 630 if (vm) 631 i915_vm_close(vm); 632 633 ctx->file_priv = ERR_PTR(-EBADF); 634 635 /* 636 * The LUT uses the VMA as a backpointer to unref the object, 637 * so we need to clear the LUT before we close all the VMA (inside 638 * the ppgtt). 639 */ 640 lut_close(ctx); 641 642 mutex_unlock(&ctx->mutex); 643 644 /* 645 * If the user has disabled hangchecking, we can not be sure that 646 * the batches will ever complete after the context is closed, 647 * keeping the context and all resources pinned forever. So in this 648 * case we opt to forcibly kill off all remaining requests on 649 * context close. 650 */ 651 if (!i915_gem_context_is_persistent(ctx) || 652 !ctx->i915->params.enable_hangcheck) 653 kill_context(ctx); 654 655 i915_gem_context_put(ctx); 656 } 657 658 static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 659 { 660 if (i915_gem_context_is_persistent(ctx) == state) 661 return 0; 662 663 if (state) { 664 /* 665 * Only contexts that are short-lived [that will expire or be 666 * reset] are allowed to survive past termination. We require 667 * hangcheck to ensure that the persistent requests are healthy. 668 */ 669 if (!ctx->i915->params.enable_hangcheck) 670 return -EINVAL; 671 672 i915_gem_context_set_persistence(ctx); 673 } else { 674 /* To cancel a context we use "preempt-to-idle" */ 675 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 676 return -ENODEV; 677 678 /* 679 * If the cancel fails, we then need to reset, cleanly! 680 * 681 * If the per-engine reset fails, all hope is lost! We resort 682 * to a full GPU reset in that unlikely case, but realistically 683 * if the engine could not reset, the full reset does not fare 684 * much better. The damage has been done. 685 * 686 * However, if we cannot reset an engine by itself, we cannot 687 * cleanup a hanging persistent context without causing 688 * colateral damage, and we should not pretend we can by 689 * exposing the interface. 690 */ 691 if (!intel_has_reset_engine(&ctx->i915->gt)) 692 return -ENODEV; 693 694 i915_gem_context_clear_persistence(ctx); 695 } 696 697 return 0; 698 } 699 700 static struct i915_gem_context * 701 __create_context(struct drm_i915_private *i915) 702 { 703 struct i915_gem_context *ctx; 704 struct i915_gem_engines *e; 705 int err; 706 int i; 707 708 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 709 if (!ctx) 710 return ERR_PTR(-ENOMEM); 711 712 kref_init(&ctx->ref); 713 ctx->i915 = i915; 714 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); 715 mutex_init(&ctx->mutex); 716 717 spin_lock_init(&ctx->stale.lock); 718 INIT_LIST_HEAD(&ctx->stale.engines); 719 720 mutex_init(&ctx->engines_mutex); 721 e = default_engines(ctx); 722 if (IS_ERR(e)) { 723 err = PTR_ERR(e); 724 goto err_free; 725 } 726 RCU_INIT_POINTER(ctx->engines, e); 727 728 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 729 mutex_init(&ctx->lut_mutex); 730 731 /* NB: Mark all slices as needing a remap so that when the context first 732 * loads it will restore whatever remap state already exists. If there 733 * is no remap info, it will be a NOP. */ 734 ctx->remap_slice = ALL_L3_SLICES(i915); 735 736 i915_gem_context_set_bannable(ctx); 737 i915_gem_context_set_recoverable(ctx); 738 __context_set_persistence(ctx, true /* cgroup hook? */); 739 740 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 741 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 742 743 spin_lock(&i915->gem.contexts.lock); 744 list_add_tail(&ctx->link, &i915->gem.contexts.list); 745 spin_unlock(&i915->gem.contexts.lock); 746 747 return ctx; 748 749 err_free: 750 kfree(ctx); 751 return ERR_PTR(err); 752 } 753 754 static inline struct i915_gem_engines * 755 __context_engines_await(const struct i915_gem_context *ctx) 756 { 757 struct i915_gem_engines *engines; 758 759 rcu_read_lock(); 760 do { 761 engines = rcu_dereference(ctx->engines); 762 GEM_BUG_ON(!engines); 763 764 if (unlikely(!i915_sw_fence_await(&engines->fence))) 765 continue; 766 767 if (likely(engines == rcu_access_pointer(ctx->engines))) 768 break; 769 770 i915_sw_fence_complete(&engines->fence); 771 } while (1); 772 rcu_read_unlock(); 773 774 return engines; 775 } 776 777 static int 778 context_apply_all(struct i915_gem_context *ctx, 779 int (*fn)(struct intel_context *ce, void *data), 780 void *data) 781 { 782 struct i915_gem_engines_iter it; 783 struct i915_gem_engines *e; 784 struct intel_context *ce; 785 int err = 0; 786 787 e = __context_engines_await(ctx); 788 for_each_gem_engine(ce, e, it) { 789 err = fn(ce, data); 790 if (err) 791 break; 792 } 793 i915_sw_fence_complete(&e->fence); 794 795 return err; 796 } 797 798 static int __apply_ppgtt(struct intel_context *ce, void *vm) 799 { 800 i915_vm_put(ce->vm); 801 ce->vm = i915_vm_get(vm); 802 return 0; 803 } 804 805 static struct i915_address_space * 806 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) 807 { 808 struct i915_address_space *old; 809 810 old = rcu_replace_pointer(ctx->vm, 811 i915_vm_open(vm), 812 lockdep_is_held(&ctx->mutex)); 813 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); 814 815 context_apply_all(ctx, __apply_ppgtt, vm); 816 817 return old; 818 } 819 820 static void __assign_ppgtt(struct i915_gem_context *ctx, 821 struct i915_address_space *vm) 822 { 823 if (vm == rcu_access_pointer(ctx->vm)) 824 return; 825 826 vm = __set_ppgtt(ctx, vm); 827 if (vm) 828 i915_vm_close(vm); 829 } 830 831 static void __set_timeline(struct intel_timeline **dst, 832 struct intel_timeline *src) 833 { 834 struct intel_timeline *old = *dst; 835 836 *dst = src ? intel_timeline_get(src) : NULL; 837 838 if (old) 839 intel_timeline_put(old); 840 } 841 842 static int __apply_timeline(struct intel_context *ce, void *timeline) 843 { 844 __set_timeline(&ce->timeline, timeline); 845 return 0; 846 } 847 848 static void __assign_timeline(struct i915_gem_context *ctx, 849 struct intel_timeline *timeline) 850 { 851 __set_timeline(&ctx->timeline, timeline); 852 context_apply_all(ctx, __apply_timeline, timeline); 853 } 854 855 static struct i915_gem_context * 856 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) 857 { 858 struct i915_gem_context *ctx; 859 860 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && 861 !HAS_EXECLISTS(i915)) 862 return ERR_PTR(-EINVAL); 863 864 /* Reap the stale contexts */ 865 contexts_flush_free(&i915->gem.contexts); 866 867 ctx = __create_context(i915); 868 if (IS_ERR(ctx)) 869 return ctx; 870 871 if (HAS_FULL_PPGTT(i915)) { 872 struct i915_ppgtt *ppgtt; 873 874 ppgtt = i915_ppgtt_create(&i915->gt); 875 if (IS_ERR(ppgtt)) { 876 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 877 PTR_ERR(ppgtt)); 878 context_close(ctx); 879 return ERR_CAST(ppgtt); 880 } 881 882 mutex_lock(&ctx->mutex); 883 __assign_ppgtt(ctx, &ppgtt->vm); 884 mutex_unlock(&ctx->mutex); 885 886 i915_vm_put(&ppgtt->vm); 887 } 888 889 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 890 struct intel_timeline *timeline; 891 892 timeline = intel_timeline_create(&i915->gt, NULL); 893 if (IS_ERR(timeline)) { 894 context_close(ctx); 895 return ERR_CAST(timeline); 896 } 897 898 __assign_timeline(ctx, timeline); 899 intel_timeline_put(timeline); 900 } 901 902 trace_i915_context_create(ctx); 903 904 return ctx; 905 } 906 907 static void init_contexts(struct i915_gem_contexts *gc) 908 { 909 spin_lock_init(&gc->lock); 910 INIT_LIST_HEAD(&gc->list); 911 912 INIT_WORK(&gc->free_work, contexts_free_worker); 913 init_llist_head(&gc->free_list); 914 } 915 916 void i915_gem_init__contexts(struct drm_i915_private *i915) 917 { 918 init_contexts(&i915->gem.contexts); 919 drm_dbg(&i915->drm, "%s context support initialized\n", 920 DRIVER_CAPS(i915)->has_logical_contexts ? 921 "logical" : "fake"); 922 } 923 924 void i915_gem_driver_release__contexts(struct drm_i915_private *i915) 925 { 926 flush_work(&i915->gem.contexts.free_work); 927 rcu_barrier(); /* and flush the left over RCU frees */ 928 } 929 930 static int gem_context_register(struct i915_gem_context *ctx, 931 struct drm_i915_file_private *fpriv, 932 u32 *id) 933 { 934 struct i915_address_space *vm; 935 int ret; 936 937 ctx->file_priv = fpriv; 938 939 mutex_lock(&ctx->mutex); 940 vm = i915_gem_context_vm(ctx); 941 if (vm) 942 WRITE_ONCE(vm->file, fpriv); /* XXX */ 943 mutex_unlock(&ctx->mutex); 944 945 ctx->pid = get_task_pid(current, PIDTYPE_PID); 946 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 947 current->comm, pid_nr(ctx->pid)); 948 949 /* And finally expose ourselves to userspace via the idr */ 950 ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); 951 if (ret) 952 put_pid(fetch_and_zero(&ctx->pid)); 953 954 return ret; 955 } 956 957 int i915_gem_context_open(struct drm_i915_private *i915, 958 struct drm_file *file) 959 { 960 struct drm_i915_file_private *file_priv = file->driver_priv; 961 struct i915_gem_context *ctx; 962 int err; 963 u32 id; 964 965 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); 966 967 /* 0 reserved for invalid/unassigned ppgtt */ 968 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 969 970 ctx = i915_gem_create_context(i915, 0); 971 if (IS_ERR(ctx)) { 972 err = PTR_ERR(ctx); 973 goto err; 974 } 975 976 err = gem_context_register(ctx, file_priv, &id); 977 if (err < 0) 978 goto err_ctx; 979 980 GEM_BUG_ON(id); 981 return 0; 982 983 err_ctx: 984 context_close(ctx); 985 err: 986 xa_destroy(&file_priv->vm_xa); 987 xa_destroy(&file_priv->context_xa); 988 return err; 989 } 990 991 void i915_gem_context_close(struct drm_file *file) 992 { 993 struct drm_i915_file_private *file_priv = file->driver_priv; 994 struct drm_i915_private *i915 = file_priv->dev_priv; 995 struct i915_address_space *vm; 996 struct i915_gem_context *ctx; 997 unsigned long idx; 998 999 xa_for_each(&file_priv->context_xa, idx, ctx) 1000 context_close(ctx); 1001 xa_destroy(&file_priv->context_xa); 1002 1003 xa_for_each(&file_priv->vm_xa, idx, vm) 1004 i915_vm_put(vm); 1005 xa_destroy(&file_priv->vm_xa); 1006 1007 contexts_flush_free(&i915->gem.contexts); 1008 } 1009 1010 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 1011 struct drm_file *file) 1012 { 1013 struct drm_i915_private *i915 = to_i915(dev); 1014 struct drm_i915_gem_vm_control *args = data; 1015 struct drm_i915_file_private *file_priv = file->driver_priv; 1016 struct i915_ppgtt *ppgtt; 1017 u32 id; 1018 int err; 1019 1020 if (!HAS_FULL_PPGTT(i915)) 1021 return -ENODEV; 1022 1023 if (args->flags) 1024 return -EINVAL; 1025 1026 ppgtt = i915_ppgtt_create(&i915->gt); 1027 if (IS_ERR(ppgtt)) 1028 return PTR_ERR(ppgtt); 1029 1030 ppgtt->vm.file = file_priv; 1031 1032 if (args->extensions) { 1033 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 1034 NULL, 0, 1035 ppgtt); 1036 if (err) 1037 goto err_put; 1038 } 1039 1040 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 1041 xa_limit_32b, GFP_KERNEL); 1042 if (err) 1043 goto err_put; 1044 1045 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1046 args->vm_id = id; 1047 return 0; 1048 1049 err_put: 1050 i915_vm_put(&ppgtt->vm); 1051 return err; 1052 } 1053 1054 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 1055 struct drm_file *file) 1056 { 1057 struct drm_i915_file_private *file_priv = file->driver_priv; 1058 struct drm_i915_gem_vm_control *args = data; 1059 struct i915_address_space *vm; 1060 1061 if (args->flags) 1062 return -EINVAL; 1063 1064 if (args->extensions) 1065 return -EINVAL; 1066 1067 vm = xa_erase(&file_priv->vm_xa, args->vm_id); 1068 if (!vm) 1069 return -ENOENT; 1070 1071 i915_vm_put(vm); 1072 return 0; 1073 } 1074 1075 struct context_barrier_task { 1076 struct i915_active base; 1077 void (*task)(void *data); 1078 void *data; 1079 }; 1080 1081 __i915_active_call 1082 static void cb_retire(struct i915_active *base) 1083 { 1084 struct context_barrier_task *cb = container_of(base, typeof(*cb), base); 1085 1086 if (cb->task) 1087 cb->task(cb->data); 1088 1089 i915_active_fini(&cb->base); 1090 kfree(cb); 1091 } 1092 1093 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); 1094 static int context_barrier_task(struct i915_gem_context *ctx, 1095 intel_engine_mask_t engines, 1096 bool (*skip)(struct intel_context *ce, void *data), 1097 int (*emit)(struct i915_request *rq, void *data), 1098 void (*task)(void *data), 1099 void *data) 1100 { 1101 struct context_barrier_task *cb; 1102 struct i915_gem_engines_iter it; 1103 struct i915_gem_engines *e; 1104 struct intel_context *ce; 1105 int err = 0; 1106 1107 GEM_BUG_ON(!task); 1108 1109 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 1110 if (!cb) 1111 return -ENOMEM; 1112 1113 i915_active_init(&cb->base, NULL, cb_retire); 1114 err = i915_active_acquire(&cb->base); 1115 if (err) { 1116 kfree(cb); 1117 return err; 1118 } 1119 1120 e = __context_engines_await(ctx); 1121 if (!e) { 1122 i915_active_release(&cb->base); 1123 return -ENOENT; 1124 } 1125 1126 for_each_gem_engine(ce, e, it) { 1127 struct i915_request *rq; 1128 1129 if (I915_SELFTEST_ONLY(context_barrier_inject_fault & 1130 ce->engine->mask)) { 1131 err = -ENXIO; 1132 break; 1133 } 1134 1135 if (!(ce->engine->mask & engines)) 1136 continue; 1137 1138 if (skip && skip(ce, data)) 1139 continue; 1140 1141 rq = intel_context_create_request(ce); 1142 if (IS_ERR(rq)) { 1143 err = PTR_ERR(rq); 1144 break; 1145 } 1146 1147 err = 0; 1148 if (emit) 1149 err = emit(rq, data); 1150 if (err == 0) 1151 err = i915_active_add_request(&cb->base, rq); 1152 1153 i915_request_add(rq); 1154 if (err) 1155 break; 1156 } 1157 i915_sw_fence_complete(&e->fence); 1158 1159 cb->task = err ? NULL : task; /* caller needs to unwind instead */ 1160 cb->data = data; 1161 1162 i915_active_release(&cb->base); 1163 1164 return err; 1165 } 1166 1167 static int get_ppgtt(struct drm_i915_file_private *file_priv, 1168 struct i915_gem_context *ctx, 1169 struct drm_i915_gem_context_param *args) 1170 { 1171 struct i915_address_space *vm; 1172 int err; 1173 u32 id; 1174 1175 if (!rcu_access_pointer(ctx->vm)) 1176 return -ENODEV; 1177 1178 rcu_read_lock(); 1179 vm = context_get_vm_rcu(ctx); 1180 rcu_read_unlock(); 1181 if (!vm) 1182 return -ENODEV; 1183 1184 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 1185 if (err) 1186 goto err_put; 1187 1188 i915_vm_open(vm); 1189 1190 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1191 args->value = id; 1192 args->size = 0; 1193 1194 err_put: 1195 i915_vm_put(vm); 1196 return err; 1197 } 1198 1199 static void set_ppgtt_barrier(void *data) 1200 { 1201 struct i915_address_space *old = data; 1202 1203 if (INTEL_GEN(old->i915) < 8) 1204 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); 1205 1206 i915_vm_close(old); 1207 } 1208 1209 static int emit_ppgtt_update(struct i915_request *rq, void *data) 1210 { 1211 struct i915_address_space *vm = rq->context->vm; 1212 struct intel_engine_cs *engine = rq->engine; 1213 u32 base = engine->mmio_base; 1214 u32 *cs; 1215 int i; 1216 1217 if (i915_vm_is_4lvl(vm)) { 1218 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1219 const dma_addr_t pd_daddr = px_dma(ppgtt->pd); 1220 1221 cs = intel_ring_begin(rq, 6); 1222 if (IS_ERR(cs)) 1223 return PTR_ERR(cs); 1224 1225 *cs++ = MI_LOAD_REGISTER_IMM(2); 1226 1227 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 1228 *cs++ = upper_32_bits(pd_daddr); 1229 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 1230 *cs++ = lower_32_bits(pd_daddr); 1231 1232 *cs++ = MI_NOOP; 1233 intel_ring_advance(rq, cs); 1234 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { 1235 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1236 int err; 1237 1238 /* Magic required to prevent forcewake errors! */ 1239 err = engine->emit_flush(rq, EMIT_INVALIDATE); 1240 if (err) 1241 return err; 1242 1243 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 1244 if (IS_ERR(cs)) 1245 return PTR_ERR(cs); 1246 1247 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 1248 for (i = GEN8_3LVL_PDPES; i--; ) { 1249 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1250 1251 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 1252 *cs++ = upper_32_bits(pd_daddr); 1253 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 1254 *cs++ = lower_32_bits(pd_daddr); 1255 } 1256 *cs++ = MI_NOOP; 1257 intel_ring_advance(rq, cs); 1258 } 1259 1260 return 0; 1261 } 1262 1263 static bool skip_ppgtt_update(struct intel_context *ce, void *data) 1264 { 1265 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 1266 return true; 1267 1268 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) 1269 return false; 1270 1271 if (!atomic_read(&ce->pin_count)) 1272 return true; 1273 1274 /* ppGTT is not part of the legacy context image */ 1275 if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm))) 1276 return true; 1277 1278 return false; 1279 } 1280 1281 static int set_ppgtt(struct drm_i915_file_private *file_priv, 1282 struct i915_gem_context *ctx, 1283 struct drm_i915_gem_context_param *args) 1284 { 1285 struct i915_address_space *vm, *old; 1286 int err; 1287 1288 if (args->size) 1289 return -EINVAL; 1290 1291 if (!rcu_access_pointer(ctx->vm)) 1292 return -ENODEV; 1293 1294 if (upper_32_bits(args->value)) 1295 return -ENOENT; 1296 1297 rcu_read_lock(); 1298 vm = xa_load(&file_priv->vm_xa, args->value); 1299 if (vm && !kref_get_unless_zero(&vm->ref)) 1300 vm = NULL; 1301 rcu_read_unlock(); 1302 if (!vm) 1303 return -ENOENT; 1304 1305 err = mutex_lock_interruptible(&ctx->mutex); 1306 if (err) 1307 goto out; 1308 1309 if (i915_gem_context_is_closed(ctx)) { 1310 err = -ENOENT; 1311 goto unlock; 1312 } 1313 1314 if (vm == rcu_access_pointer(ctx->vm)) 1315 goto unlock; 1316 1317 old = __set_ppgtt(ctx, vm); 1318 1319 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ 1320 lut_close(ctx); 1321 1322 /* 1323 * We need to flush any requests using the current ppgtt before 1324 * we release it as the requests do not hold a reference themselves, 1325 * only indirectly through the context. 1326 */ 1327 err = context_barrier_task(ctx, ALL_ENGINES, 1328 skip_ppgtt_update, 1329 emit_ppgtt_update, 1330 set_ppgtt_barrier, 1331 old); 1332 if (err) { 1333 i915_vm_close(__set_ppgtt(ctx, old)); 1334 i915_vm_close(old); 1335 lut_close(ctx); /* force a rebuild of the old obj:vma cache */ 1336 } 1337 1338 unlock: 1339 mutex_unlock(&ctx->mutex); 1340 out: 1341 i915_vm_put(vm); 1342 return err; 1343 } 1344 1345 static int __apply_ringsize(struct intel_context *ce, void *sz) 1346 { 1347 return intel_context_set_ring_size(ce, (unsigned long)sz); 1348 } 1349 1350 static int set_ringsize(struct i915_gem_context *ctx, 1351 struct drm_i915_gem_context_param *args) 1352 { 1353 if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915)) 1354 return -ENODEV; 1355 1356 if (args->size) 1357 return -EINVAL; 1358 1359 if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE)) 1360 return -EINVAL; 1361 1362 if (args->value < I915_GTT_PAGE_SIZE) 1363 return -EINVAL; 1364 1365 if (args->value > 128 * I915_GTT_PAGE_SIZE) 1366 return -EINVAL; 1367 1368 return context_apply_all(ctx, 1369 __apply_ringsize, 1370 __intel_context_ring_size(args->value)); 1371 } 1372 1373 static int __get_ringsize(struct intel_context *ce, void *arg) 1374 { 1375 long sz; 1376 1377 sz = intel_context_get_ring_size(ce); 1378 GEM_BUG_ON(sz > INT_MAX); 1379 1380 return sz; /* stop on first engine */ 1381 } 1382 1383 static int get_ringsize(struct i915_gem_context *ctx, 1384 struct drm_i915_gem_context_param *args) 1385 { 1386 int sz; 1387 1388 if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915)) 1389 return -ENODEV; 1390 1391 if (args->size) 1392 return -EINVAL; 1393 1394 sz = context_apply_all(ctx, __get_ringsize, NULL); 1395 if (sz < 0) 1396 return sz; 1397 1398 args->value = sz; 1399 return 0; 1400 } 1401 1402 int 1403 i915_gem_user_to_context_sseu(struct intel_gt *gt, 1404 const struct drm_i915_gem_context_param_sseu *user, 1405 struct intel_sseu *context) 1406 { 1407 const struct sseu_dev_info *device = >->info.sseu; 1408 struct drm_i915_private *i915 = gt->i915; 1409 1410 /* No zeros in any field. */ 1411 if (!user->slice_mask || !user->subslice_mask || 1412 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1413 return -EINVAL; 1414 1415 /* Max > min. */ 1416 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1417 return -EINVAL; 1418 1419 /* 1420 * Some future proofing on the types since the uAPI is wider than the 1421 * current internal implementation. 1422 */ 1423 if (overflows_type(user->slice_mask, context->slice_mask) || 1424 overflows_type(user->subslice_mask, context->subslice_mask) || 1425 overflows_type(user->min_eus_per_subslice, 1426 context->min_eus_per_subslice) || 1427 overflows_type(user->max_eus_per_subslice, 1428 context->max_eus_per_subslice)) 1429 return -EINVAL; 1430 1431 /* Check validity against hardware. */ 1432 if (user->slice_mask & ~device->slice_mask) 1433 return -EINVAL; 1434 1435 if (user->subslice_mask & ~device->subslice_mask[0]) 1436 return -EINVAL; 1437 1438 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1439 return -EINVAL; 1440 1441 context->slice_mask = user->slice_mask; 1442 context->subslice_mask = user->subslice_mask; 1443 context->min_eus_per_subslice = user->min_eus_per_subslice; 1444 context->max_eus_per_subslice = user->max_eus_per_subslice; 1445 1446 /* Part specific restrictions. */ 1447 if (IS_GEN(i915, 11)) { 1448 unsigned int hw_s = hweight8(device->slice_mask); 1449 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 1450 unsigned int req_s = hweight8(context->slice_mask); 1451 unsigned int req_ss = hweight8(context->subslice_mask); 1452 1453 /* 1454 * Only full subslice enablement is possible if more than one 1455 * slice is turned on. 1456 */ 1457 if (req_s > 1 && req_ss != hw_ss_per_s) 1458 return -EINVAL; 1459 1460 /* 1461 * If more than four (SScount bitfield limit) subslices are 1462 * requested then the number has to be even. 1463 */ 1464 if (req_ss > 4 && (req_ss & 1)) 1465 return -EINVAL; 1466 1467 /* 1468 * If only one slice is enabled and subslice count is below the 1469 * device full enablement, it must be at most half of the all 1470 * available subslices. 1471 */ 1472 if (req_s == 1 && req_ss < hw_ss_per_s && 1473 req_ss > (hw_ss_per_s / 2)) 1474 return -EINVAL; 1475 1476 /* ABI restriction - VME use case only. */ 1477 1478 /* All slices or one slice only. */ 1479 if (req_s != 1 && req_s != hw_s) 1480 return -EINVAL; 1481 1482 /* 1483 * Half subslices or full enablement only when one slice is 1484 * enabled. 1485 */ 1486 if (req_s == 1 && 1487 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1488 return -EINVAL; 1489 1490 /* No EU configuration changes. */ 1491 if ((user->min_eus_per_subslice != 1492 device->max_eus_per_subslice) || 1493 (user->max_eus_per_subslice != 1494 device->max_eus_per_subslice)) 1495 return -EINVAL; 1496 } 1497 1498 return 0; 1499 } 1500 1501 static int set_sseu(struct i915_gem_context *ctx, 1502 struct drm_i915_gem_context_param *args) 1503 { 1504 struct drm_i915_private *i915 = ctx->i915; 1505 struct drm_i915_gem_context_param_sseu user_sseu; 1506 struct intel_context *ce; 1507 struct intel_sseu sseu; 1508 unsigned long lookup; 1509 int ret; 1510 1511 if (args->size < sizeof(user_sseu)) 1512 return -EINVAL; 1513 1514 if (!IS_GEN(i915, 11)) 1515 return -ENODEV; 1516 1517 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1518 sizeof(user_sseu))) 1519 return -EFAULT; 1520 1521 if (user_sseu.rsvd) 1522 return -EINVAL; 1523 1524 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1525 return -EINVAL; 1526 1527 lookup = 0; 1528 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 1529 lookup |= LOOKUP_USER_INDEX; 1530 1531 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 1532 if (IS_ERR(ce)) 1533 return PTR_ERR(ce); 1534 1535 /* Only render engine supports RPCS configuration. */ 1536 if (ce->engine->class != RENDER_CLASS) { 1537 ret = -ENODEV; 1538 goto out_ce; 1539 } 1540 1541 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); 1542 if (ret) 1543 goto out_ce; 1544 1545 ret = intel_context_reconfigure_sseu(ce, sseu); 1546 if (ret) 1547 goto out_ce; 1548 1549 args->size = sizeof(user_sseu); 1550 1551 out_ce: 1552 intel_context_put(ce); 1553 return ret; 1554 } 1555 1556 struct set_engines { 1557 struct i915_gem_context *ctx; 1558 struct i915_gem_engines *engines; 1559 }; 1560 1561 static int 1562 set_engines__load_balance(struct i915_user_extension __user *base, void *data) 1563 { 1564 struct i915_context_engines_load_balance __user *ext = 1565 container_of_user(base, typeof(*ext), base); 1566 const struct set_engines *set = data; 1567 struct drm_i915_private *i915 = set->ctx->i915; 1568 struct intel_engine_cs *stack[16]; 1569 struct intel_engine_cs **siblings; 1570 struct intel_context *ce; 1571 u16 num_siblings, idx; 1572 unsigned int n; 1573 int err; 1574 1575 if (!HAS_EXECLISTS(i915)) 1576 return -ENODEV; 1577 1578 if (intel_uc_uses_guc_submission(&i915->gt.uc)) 1579 return -ENODEV; /* not implement yet */ 1580 1581 if (get_user(idx, &ext->engine_index)) 1582 return -EFAULT; 1583 1584 if (idx >= set->engines->num_engines) { 1585 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 1586 idx, set->engines->num_engines); 1587 return -EINVAL; 1588 } 1589 1590 idx = array_index_nospec(idx, set->engines->num_engines); 1591 if (set->engines->engines[idx]) { 1592 drm_dbg(&i915->drm, 1593 "Invalid placement[%d], already occupied\n", idx); 1594 return -EEXIST; 1595 } 1596 1597 if (get_user(num_siblings, &ext->num_siblings)) 1598 return -EFAULT; 1599 1600 err = check_user_mbz(&ext->flags); 1601 if (err) 1602 return err; 1603 1604 err = check_user_mbz(&ext->mbz64); 1605 if (err) 1606 return err; 1607 1608 siblings = stack; 1609 if (num_siblings > ARRAY_SIZE(stack)) { 1610 siblings = kmalloc_array(num_siblings, 1611 sizeof(*siblings), 1612 GFP_KERNEL); 1613 if (!siblings) 1614 return -ENOMEM; 1615 } 1616 1617 for (n = 0; n < num_siblings; n++) { 1618 struct i915_engine_class_instance ci; 1619 1620 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 1621 err = -EFAULT; 1622 goto out_siblings; 1623 } 1624 1625 siblings[n] = intel_engine_lookup_user(i915, 1626 ci.engine_class, 1627 ci.engine_instance); 1628 if (!siblings[n]) { 1629 drm_dbg(&i915->drm, 1630 "Invalid sibling[%d]: { class:%d, inst:%d }\n", 1631 n, ci.engine_class, ci.engine_instance); 1632 err = -EINVAL; 1633 goto out_siblings; 1634 } 1635 } 1636 1637 ce = intel_execlists_create_virtual(siblings, n); 1638 if (IS_ERR(ce)) { 1639 err = PTR_ERR(ce); 1640 goto out_siblings; 1641 } 1642 1643 intel_context_set_gem(ce, set->ctx); 1644 1645 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { 1646 intel_context_put(ce); 1647 err = -EEXIST; 1648 goto out_siblings; 1649 } 1650 1651 out_siblings: 1652 if (siblings != stack) 1653 kfree(siblings); 1654 1655 return err; 1656 } 1657 1658 static int 1659 set_engines__bond(struct i915_user_extension __user *base, void *data) 1660 { 1661 struct i915_context_engines_bond __user *ext = 1662 container_of_user(base, typeof(*ext), base); 1663 const struct set_engines *set = data; 1664 struct drm_i915_private *i915 = set->ctx->i915; 1665 struct i915_engine_class_instance ci; 1666 struct intel_engine_cs *virtual; 1667 struct intel_engine_cs *master; 1668 u16 idx, num_bonds; 1669 int err, n; 1670 1671 if (get_user(idx, &ext->virtual_index)) 1672 return -EFAULT; 1673 1674 if (idx >= set->engines->num_engines) { 1675 drm_dbg(&i915->drm, 1676 "Invalid index for virtual engine: %d >= %d\n", 1677 idx, set->engines->num_engines); 1678 return -EINVAL; 1679 } 1680 1681 idx = array_index_nospec(idx, set->engines->num_engines); 1682 if (!set->engines->engines[idx]) { 1683 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 1684 return -EINVAL; 1685 } 1686 virtual = set->engines->engines[idx]->engine; 1687 1688 err = check_user_mbz(&ext->flags); 1689 if (err) 1690 return err; 1691 1692 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 1693 err = check_user_mbz(&ext->mbz64[n]); 1694 if (err) 1695 return err; 1696 } 1697 1698 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 1699 return -EFAULT; 1700 1701 master = intel_engine_lookup_user(i915, 1702 ci.engine_class, ci.engine_instance); 1703 if (!master) { 1704 drm_dbg(&i915->drm, 1705 "Unrecognised master engine: { class:%u, instance:%u }\n", 1706 ci.engine_class, ci.engine_instance); 1707 return -EINVAL; 1708 } 1709 1710 if (get_user(num_bonds, &ext->num_bonds)) 1711 return -EFAULT; 1712 1713 for (n = 0; n < num_bonds; n++) { 1714 struct intel_engine_cs *bond; 1715 1716 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 1717 return -EFAULT; 1718 1719 bond = intel_engine_lookup_user(i915, 1720 ci.engine_class, 1721 ci.engine_instance); 1722 if (!bond) { 1723 drm_dbg(&i915->drm, 1724 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 1725 n, ci.engine_class, ci.engine_instance); 1726 return -EINVAL; 1727 } 1728 1729 /* 1730 * A non-virtual engine has no siblings to choose between; and 1731 * a submit fence will always be directed to the one engine. 1732 */ 1733 if (intel_engine_is_virtual(virtual)) { 1734 err = intel_virtual_engine_attach_bond(virtual, 1735 master, 1736 bond); 1737 if (err) 1738 return err; 1739 } 1740 } 1741 1742 return 0; 1743 } 1744 1745 static const i915_user_extension_fn set_engines__extensions[] = { 1746 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, 1747 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, 1748 }; 1749 1750 static int 1751 set_engines(struct i915_gem_context *ctx, 1752 const struct drm_i915_gem_context_param *args) 1753 { 1754 struct drm_i915_private *i915 = ctx->i915; 1755 struct i915_context_param_engines __user *user = 1756 u64_to_user_ptr(args->value); 1757 struct set_engines set = { .ctx = ctx }; 1758 unsigned int num_engines, n; 1759 u64 extensions; 1760 int err; 1761 1762 if (!args->size) { /* switch back to legacy user_ring_map */ 1763 if (!i915_gem_context_user_engines(ctx)) 1764 return 0; 1765 1766 set.engines = default_engines(ctx); 1767 if (IS_ERR(set.engines)) 1768 return PTR_ERR(set.engines); 1769 1770 goto replace; 1771 } 1772 1773 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); 1774 if (args->size < sizeof(*user) || 1775 !IS_ALIGNED(args->size, sizeof(*user->engines))) { 1776 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 1777 args->size); 1778 return -EINVAL; 1779 } 1780 1781 /* 1782 * Note that I915_EXEC_RING_MASK limits execbuf to only using the 1783 * first 64 engines defined here. 1784 */ 1785 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 1786 set.engines = alloc_engines(num_engines); 1787 if (!set.engines) 1788 return -ENOMEM; 1789 1790 for (n = 0; n < num_engines; n++) { 1791 struct i915_engine_class_instance ci; 1792 struct intel_engine_cs *engine; 1793 struct intel_context *ce; 1794 1795 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 1796 __free_engines(set.engines, n); 1797 return -EFAULT; 1798 } 1799 1800 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 1801 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { 1802 set.engines->engines[n] = NULL; 1803 continue; 1804 } 1805 1806 engine = intel_engine_lookup_user(ctx->i915, 1807 ci.engine_class, 1808 ci.engine_instance); 1809 if (!engine) { 1810 drm_dbg(&i915->drm, 1811 "Invalid engine[%d]: { class:%d, instance:%d }\n", 1812 n, ci.engine_class, ci.engine_instance); 1813 __free_engines(set.engines, n); 1814 return -ENOENT; 1815 } 1816 1817 ce = intel_context_create(engine); 1818 if (IS_ERR(ce)) { 1819 __free_engines(set.engines, n); 1820 return PTR_ERR(ce); 1821 } 1822 1823 intel_context_set_gem(ce, ctx); 1824 1825 set.engines->engines[n] = ce; 1826 } 1827 set.engines->num_engines = num_engines; 1828 1829 err = -EFAULT; 1830 if (!get_user(extensions, &user->extensions)) 1831 err = i915_user_extensions(u64_to_user_ptr(extensions), 1832 set_engines__extensions, 1833 ARRAY_SIZE(set_engines__extensions), 1834 &set); 1835 if (err) { 1836 free_engines(set.engines); 1837 return err; 1838 } 1839 1840 replace: 1841 mutex_lock(&ctx->engines_mutex); 1842 if (i915_gem_context_is_closed(ctx)) { 1843 mutex_unlock(&ctx->engines_mutex); 1844 free_engines(set.engines); 1845 return -ENOENT; 1846 } 1847 if (args->size) 1848 i915_gem_context_set_user_engines(ctx); 1849 else 1850 i915_gem_context_clear_user_engines(ctx); 1851 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); 1852 mutex_unlock(&ctx->engines_mutex); 1853 1854 /* Keep track of old engine sets for kill_context() */ 1855 engines_idle_release(ctx, set.engines); 1856 1857 return 0; 1858 } 1859 1860 static struct i915_gem_engines * 1861 __copy_engines(struct i915_gem_engines *e) 1862 { 1863 struct i915_gem_engines *copy; 1864 unsigned int n; 1865 1866 copy = alloc_engines(e->num_engines); 1867 if (!copy) 1868 return ERR_PTR(-ENOMEM); 1869 1870 for (n = 0; n < e->num_engines; n++) { 1871 if (e->engines[n]) 1872 copy->engines[n] = intel_context_get(e->engines[n]); 1873 else 1874 copy->engines[n] = NULL; 1875 } 1876 copy->num_engines = n; 1877 1878 return copy; 1879 } 1880 1881 static int 1882 get_engines(struct i915_gem_context *ctx, 1883 struct drm_i915_gem_context_param *args) 1884 { 1885 struct i915_context_param_engines __user *user; 1886 struct i915_gem_engines *e; 1887 size_t n, count, size; 1888 int err = 0; 1889 1890 err = mutex_lock_interruptible(&ctx->engines_mutex); 1891 if (err) 1892 return err; 1893 1894 e = NULL; 1895 if (i915_gem_context_user_engines(ctx)) 1896 e = __copy_engines(i915_gem_context_engines(ctx)); 1897 mutex_unlock(&ctx->engines_mutex); 1898 if (IS_ERR_OR_NULL(e)) { 1899 args->size = 0; 1900 return PTR_ERR_OR_ZERO(e); 1901 } 1902 1903 count = e->num_engines; 1904 1905 /* Be paranoid in case we have an impedance mismatch */ 1906 if (!check_struct_size(user, engines, count, &size)) { 1907 err = -EINVAL; 1908 goto err_free; 1909 } 1910 if (overflows_type(size, args->size)) { 1911 err = -EINVAL; 1912 goto err_free; 1913 } 1914 1915 if (!args->size) { 1916 args->size = size; 1917 goto err_free; 1918 } 1919 1920 if (args->size < size) { 1921 err = -EINVAL; 1922 goto err_free; 1923 } 1924 1925 user = u64_to_user_ptr(args->value); 1926 if (put_user(0, &user->extensions)) { 1927 err = -EFAULT; 1928 goto err_free; 1929 } 1930 1931 for (n = 0; n < count; n++) { 1932 struct i915_engine_class_instance ci = { 1933 .engine_class = I915_ENGINE_CLASS_INVALID, 1934 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, 1935 }; 1936 1937 if (e->engines[n]) { 1938 ci.engine_class = e->engines[n]->engine->uabi_class; 1939 ci.engine_instance = e->engines[n]->engine->uabi_instance; 1940 } 1941 1942 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { 1943 err = -EFAULT; 1944 goto err_free; 1945 } 1946 } 1947 1948 args->size = size; 1949 1950 err_free: 1951 free_engines(e); 1952 return err; 1953 } 1954 1955 static int 1956 set_persistence(struct i915_gem_context *ctx, 1957 const struct drm_i915_gem_context_param *args) 1958 { 1959 if (args->size) 1960 return -EINVAL; 1961 1962 return __context_set_persistence(ctx, args->value); 1963 } 1964 1965 static int __apply_priority(struct intel_context *ce, void *arg) 1966 { 1967 struct i915_gem_context *ctx = arg; 1968 1969 if (!intel_engine_has_timeslices(ce->engine)) 1970 return 0; 1971 1972 if (ctx->sched.priority >= I915_PRIORITY_NORMAL) 1973 intel_context_set_use_semaphores(ce); 1974 else 1975 intel_context_clear_use_semaphores(ce); 1976 1977 return 0; 1978 } 1979 1980 static int set_priority(struct i915_gem_context *ctx, 1981 const struct drm_i915_gem_context_param *args) 1982 { 1983 s64 priority = args->value; 1984 1985 if (args->size) 1986 return -EINVAL; 1987 1988 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 1989 return -ENODEV; 1990 1991 if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 1992 priority < I915_CONTEXT_MIN_USER_PRIORITY) 1993 return -EINVAL; 1994 1995 if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 1996 !capable(CAP_SYS_NICE)) 1997 return -EPERM; 1998 1999 ctx->sched.priority = I915_USER_PRIORITY(priority); 2000 context_apply_all(ctx, __apply_priority, ctx); 2001 2002 return 0; 2003 } 2004 2005 static int ctx_setparam(struct drm_i915_file_private *fpriv, 2006 struct i915_gem_context *ctx, 2007 struct drm_i915_gem_context_param *args) 2008 { 2009 int ret = 0; 2010 2011 switch (args->param) { 2012 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2013 if (args->size) 2014 ret = -EINVAL; 2015 else if (args->value) 2016 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2017 else 2018 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2019 break; 2020 2021 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2022 if (args->size) 2023 ret = -EINVAL; 2024 else if (args->value) 2025 i915_gem_context_set_no_error_capture(ctx); 2026 else 2027 i915_gem_context_clear_no_error_capture(ctx); 2028 break; 2029 2030 case I915_CONTEXT_PARAM_BANNABLE: 2031 if (args->size) 2032 ret = -EINVAL; 2033 else if (!capable(CAP_SYS_ADMIN) && !args->value) 2034 ret = -EPERM; 2035 else if (args->value) 2036 i915_gem_context_set_bannable(ctx); 2037 else 2038 i915_gem_context_clear_bannable(ctx); 2039 break; 2040 2041 case I915_CONTEXT_PARAM_RECOVERABLE: 2042 if (args->size) 2043 ret = -EINVAL; 2044 else if (args->value) 2045 i915_gem_context_set_recoverable(ctx); 2046 else 2047 i915_gem_context_clear_recoverable(ctx); 2048 break; 2049 2050 case I915_CONTEXT_PARAM_PRIORITY: 2051 ret = set_priority(ctx, args); 2052 break; 2053 2054 case I915_CONTEXT_PARAM_SSEU: 2055 ret = set_sseu(ctx, args); 2056 break; 2057 2058 case I915_CONTEXT_PARAM_VM: 2059 ret = set_ppgtt(fpriv, ctx, args); 2060 break; 2061 2062 case I915_CONTEXT_PARAM_ENGINES: 2063 ret = set_engines(ctx, args); 2064 break; 2065 2066 case I915_CONTEXT_PARAM_PERSISTENCE: 2067 ret = set_persistence(ctx, args); 2068 break; 2069 2070 case I915_CONTEXT_PARAM_RINGSIZE: 2071 ret = set_ringsize(ctx, args); 2072 break; 2073 2074 case I915_CONTEXT_PARAM_BAN_PERIOD: 2075 default: 2076 ret = -EINVAL; 2077 break; 2078 } 2079 2080 return ret; 2081 } 2082 2083 struct create_ext { 2084 struct i915_gem_context *ctx; 2085 struct drm_i915_file_private *fpriv; 2086 }; 2087 2088 static int create_setparam(struct i915_user_extension __user *ext, void *data) 2089 { 2090 struct drm_i915_gem_context_create_ext_setparam local; 2091 const struct create_ext *arg = data; 2092 2093 if (copy_from_user(&local, ext, sizeof(local))) 2094 return -EFAULT; 2095 2096 if (local.param.ctx_id) 2097 return -EINVAL; 2098 2099 return ctx_setparam(arg->fpriv, arg->ctx, &local.param); 2100 } 2101 2102 static int copy_ring_size(struct intel_context *dst, 2103 struct intel_context *src) 2104 { 2105 long sz; 2106 2107 sz = intel_context_get_ring_size(src); 2108 if (sz < 0) 2109 return sz; 2110 2111 return intel_context_set_ring_size(dst, sz); 2112 } 2113 2114 static int clone_engines(struct i915_gem_context *dst, 2115 struct i915_gem_context *src) 2116 { 2117 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 2118 struct i915_gem_engines *clone; 2119 bool user_engines; 2120 unsigned long n; 2121 2122 clone = alloc_engines(e->num_engines); 2123 if (!clone) 2124 goto err_unlock; 2125 2126 for (n = 0; n < e->num_engines; n++) { 2127 struct intel_engine_cs *engine; 2128 2129 if (!e->engines[n]) { 2130 clone->engines[n] = NULL; 2131 continue; 2132 } 2133 engine = e->engines[n]->engine; 2134 2135 /* 2136 * Virtual engines are singletons; they can only exist 2137 * inside a single context, because they embed their 2138 * HW context... As each virtual context implies a single 2139 * timeline (each engine can only dequeue a single request 2140 * at any time), it would be surprising for two contexts 2141 * to use the same engine. So let's create a copy of 2142 * the virtual engine instead. 2143 */ 2144 if (intel_engine_is_virtual(engine)) 2145 clone->engines[n] = 2146 intel_execlists_clone_virtual(engine); 2147 else 2148 clone->engines[n] = intel_context_create(engine); 2149 if (IS_ERR_OR_NULL(clone->engines[n])) { 2150 __free_engines(clone, n); 2151 goto err_unlock; 2152 } 2153 2154 intel_context_set_gem(clone->engines[n], dst); 2155 2156 /* Copy across the preferred ringsize */ 2157 if (copy_ring_size(clone->engines[n], e->engines[n])) { 2158 __free_engines(clone, n + 1); 2159 goto err_unlock; 2160 } 2161 } 2162 clone->num_engines = n; 2163 2164 user_engines = i915_gem_context_user_engines(src); 2165 i915_gem_context_unlock_engines(src); 2166 2167 /* Serialised by constructor */ 2168 engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1)); 2169 if (user_engines) 2170 i915_gem_context_set_user_engines(dst); 2171 else 2172 i915_gem_context_clear_user_engines(dst); 2173 return 0; 2174 2175 err_unlock: 2176 i915_gem_context_unlock_engines(src); 2177 return -ENOMEM; 2178 } 2179 2180 static int clone_flags(struct i915_gem_context *dst, 2181 struct i915_gem_context *src) 2182 { 2183 dst->user_flags = src->user_flags; 2184 return 0; 2185 } 2186 2187 static int clone_schedattr(struct i915_gem_context *dst, 2188 struct i915_gem_context *src) 2189 { 2190 dst->sched = src->sched; 2191 return 0; 2192 } 2193 2194 static int clone_sseu(struct i915_gem_context *dst, 2195 struct i915_gem_context *src) 2196 { 2197 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 2198 struct i915_gem_engines *clone; 2199 unsigned long n; 2200 int err; 2201 2202 /* no locking required; sole access under constructor*/ 2203 clone = __context_engines_static(dst); 2204 if (e->num_engines != clone->num_engines) { 2205 err = -EINVAL; 2206 goto unlock; 2207 } 2208 2209 for (n = 0; n < e->num_engines; n++) { 2210 struct intel_context *ce = e->engines[n]; 2211 2212 if (clone->engines[n]->engine->class != ce->engine->class) { 2213 /* Must have compatible engine maps! */ 2214 err = -EINVAL; 2215 goto unlock; 2216 } 2217 2218 /* serialises with set_sseu */ 2219 err = intel_context_lock_pinned(ce); 2220 if (err) 2221 goto unlock; 2222 2223 clone->engines[n]->sseu = ce->sseu; 2224 intel_context_unlock_pinned(ce); 2225 } 2226 2227 err = 0; 2228 unlock: 2229 i915_gem_context_unlock_engines(src); 2230 return err; 2231 } 2232 2233 static int clone_timeline(struct i915_gem_context *dst, 2234 struct i915_gem_context *src) 2235 { 2236 if (src->timeline) 2237 __assign_timeline(dst, src->timeline); 2238 2239 return 0; 2240 } 2241 2242 static int clone_vm(struct i915_gem_context *dst, 2243 struct i915_gem_context *src) 2244 { 2245 struct i915_address_space *vm; 2246 int err = 0; 2247 2248 if (!rcu_access_pointer(src->vm)) 2249 return 0; 2250 2251 rcu_read_lock(); 2252 vm = context_get_vm_rcu(src); 2253 rcu_read_unlock(); 2254 2255 if (!mutex_lock_interruptible(&dst->mutex)) { 2256 __assign_ppgtt(dst, vm); 2257 mutex_unlock(&dst->mutex); 2258 } else { 2259 err = -EINTR; 2260 } 2261 2262 i915_vm_put(vm); 2263 return err; 2264 } 2265 2266 static int create_clone(struct i915_user_extension __user *ext, void *data) 2267 { 2268 static int (* const fn[])(struct i915_gem_context *dst, 2269 struct i915_gem_context *src) = { 2270 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y 2271 MAP(ENGINES, clone_engines), 2272 MAP(FLAGS, clone_flags), 2273 MAP(SCHEDATTR, clone_schedattr), 2274 MAP(SSEU, clone_sseu), 2275 MAP(TIMELINE, clone_timeline), 2276 MAP(VM, clone_vm), 2277 #undef MAP 2278 }; 2279 struct drm_i915_gem_context_create_ext_clone local; 2280 const struct create_ext *arg = data; 2281 struct i915_gem_context *dst = arg->ctx; 2282 struct i915_gem_context *src; 2283 int err, bit; 2284 2285 if (copy_from_user(&local, ext, sizeof(local))) 2286 return -EFAULT; 2287 2288 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != 2289 I915_CONTEXT_CLONE_UNKNOWN); 2290 2291 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) 2292 return -EINVAL; 2293 2294 if (local.rsvd) 2295 return -EINVAL; 2296 2297 rcu_read_lock(); 2298 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); 2299 rcu_read_unlock(); 2300 if (!src) 2301 return -ENOENT; 2302 2303 GEM_BUG_ON(src == dst); 2304 2305 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { 2306 if (!(local.flags & BIT(bit))) 2307 continue; 2308 2309 err = fn[bit](dst, src); 2310 if (err) 2311 return err; 2312 } 2313 2314 return 0; 2315 } 2316 2317 static const i915_user_extension_fn create_extensions[] = { 2318 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 2319 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, 2320 }; 2321 2322 static bool client_is_banned(struct drm_i915_file_private *file_priv) 2323 { 2324 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 2325 } 2326 2327 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2328 struct drm_file *file) 2329 { 2330 struct drm_i915_private *i915 = to_i915(dev); 2331 struct drm_i915_gem_context_create_ext *args = data; 2332 struct create_ext ext_data; 2333 int ret; 2334 u32 id; 2335 2336 if (!DRIVER_CAPS(i915)->has_logical_contexts) 2337 return -ENODEV; 2338 2339 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 2340 return -EINVAL; 2341 2342 ret = intel_gt_terminally_wedged(&i915->gt); 2343 if (ret) 2344 return ret; 2345 2346 ext_data.fpriv = file->driver_priv; 2347 if (client_is_banned(ext_data.fpriv)) { 2348 drm_dbg(&i915->drm, 2349 "client %s[%d] banned from creating ctx\n", 2350 current->comm, task_pid_nr(current)); 2351 return -EIO; 2352 } 2353 2354 ext_data.ctx = i915_gem_create_context(i915, args->flags); 2355 if (IS_ERR(ext_data.ctx)) 2356 return PTR_ERR(ext_data.ctx); 2357 2358 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 2359 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 2360 create_extensions, 2361 ARRAY_SIZE(create_extensions), 2362 &ext_data); 2363 if (ret) 2364 goto err_ctx; 2365 } 2366 2367 ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); 2368 if (ret < 0) 2369 goto err_ctx; 2370 2371 args->ctx_id = id; 2372 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); 2373 2374 return 0; 2375 2376 err_ctx: 2377 context_close(ext_data.ctx); 2378 return ret; 2379 } 2380 2381 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2382 struct drm_file *file) 2383 { 2384 struct drm_i915_gem_context_destroy *args = data; 2385 struct drm_i915_file_private *file_priv = file->driver_priv; 2386 struct i915_gem_context *ctx; 2387 2388 if (args->pad != 0) 2389 return -EINVAL; 2390 2391 if (!args->ctx_id) 2392 return -ENOENT; 2393 2394 ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2395 if (!ctx) 2396 return -ENOENT; 2397 2398 context_close(ctx); 2399 return 0; 2400 } 2401 2402 static int get_sseu(struct i915_gem_context *ctx, 2403 struct drm_i915_gem_context_param *args) 2404 { 2405 struct drm_i915_gem_context_param_sseu user_sseu; 2406 struct intel_context *ce; 2407 unsigned long lookup; 2408 int err; 2409 2410 if (args->size == 0) 2411 goto out; 2412 else if (args->size < sizeof(user_sseu)) 2413 return -EINVAL; 2414 2415 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2416 sizeof(user_sseu))) 2417 return -EFAULT; 2418 2419 if (user_sseu.rsvd) 2420 return -EINVAL; 2421 2422 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2423 return -EINVAL; 2424 2425 lookup = 0; 2426 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2427 lookup |= LOOKUP_USER_INDEX; 2428 2429 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2430 if (IS_ERR(ce)) 2431 return PTR_ERR(ce); 2432 2433 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2434 if (err) { 2435 intel_context_put(ce); 2436 return err; 2437 } 2438 2439 user_sseu.slice_mask = ce->sseu.slice_mask; 2440 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2441 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2442 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2443 2444 intel_context_unlock_pinned(ce); 2445 intel_context_put(ce); 2446 2447 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2448 sizeof(user_sseu))) 2449 return -EFAULT; 2450 2451 out: 2452 args->size = sizeof(user_sseu); 2453 2454 return 0; 2455 } 2456 2457 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2458 struct drm_file *file) 2459 { 2460 struct drm_i915_file_private *file_priv = file->driver_priv; 2461 struct drm_i915_gem_context_param *args = data; 2462 struct i915_gem_context *ctx; 2463 int ret = 0; 2464 2465 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2466 if (!ctx) 2467 return -ENOENT; 2468 2469 switch (args->param) { 2470 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2471 args->size = 0; 2472 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2473 break; 2474 2475 case I915_CONTEXT_PARAM_GTT_SIZE: 2476 args->size = 0; 2477 rcu_read_lock(); 2478 if (rcu_access_pointer(ctx->vm)) 2479 args->value = rcu_dereference(ctx->vm)->total; 2480 else 2481 args->value = to_i915(dev)->ggtt.vm.total; 2482 rcu_read_unlock(); 2483 break; 2484 2485 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2486 args->size = 0; 2487 args->value = i915_gem_context_no_error_capture(ctx); 2488 break; 2489 2490 case I915_CONTEXT_PARAM_BANNABLE: 2491 args->size = 0; 2492 args->value = i915_gem_context_is_bannable(ctx); 2493 break; 2494 2495 case I915_CONTEXT_PARAM_RECOVERABLE: 2496 args->size = 0; 2497 args->value = i915_gem_context_is_recoverable(ctx); 2498 break; 2499 2500 case I915_CONTEXT_PARAM_PRIORITY: 2501 args->size = 0; 2502 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; 2503 break; 2504 2505 case I915_CONTEXT_PARAM_SSEU: 2506 ret = get_sseu(ctx, args); 2507 break; 2508 2509 case I915_CONTEXT_PARAM_VM: 2510 ret = get_ppgtt(file_priv, ctx, args); 2511 break; 2512 2513 case I915_CONTEXT_PARAM_ENGINES: 2514 ret = get_engines(ctx, args); 2515 break; 2516 2517 case I915_CONTEXT_PARAM_PERSISTENCE: 2518 args->size = 0; 2519 args->value = i915_gem_context_is_persistent(ctx); 2520 break; 2521 2522 case I915_CONTEXT_PARAM_RINGSIZE: 2523 ret = get_ringsize(ctx, args); 2524 break; 2525 2526 case I915_CONTEXT_PARAM_BAN_PERIOD: 2527 default: 2528 ret = -EINVAL; 2529 break; 2530 } 2531 2532 i915_gem_context_put(ctx); 2533 return ret; 2534 } 2535 2536 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2537 struct drm_file *file) 2538 { 2539 struct drm_i915_file_private *file_priv = file->driver_priv; 2540 struct drm_i915_gem_context_param *args = data; 2541 struct i915_gem_context *ctx; 2542 int ret; 2543 2544 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2545 if (!ctx) 2546 return -ENOENT; 2547 2548 ret = ctx_setparam(file_priv, ctx, args); 2549 2550 i915_gem_context_put(ctx); 2551 return ret; 2552 } 2553 2554 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2555 void *data, struct drm_file *file) 2556 { 2557 struct drm_i915_private *i915 = to_i915(dev); 2558 struct drm_i915_reset_stats *args = data; 2559 struct i915_gem_context *ctx; 2560 int ret; 2561 2562 if (args->flags || args->pad) 2563 return -EINVAL; 2564 2565 ret = -ENOENT; 2566 rcu_read_lock(); 2567 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); 2568 if (!ctx) 2569 goto out; 2570 2571 /* 2572 * We opt for unserialised reads here. This may result in tearing 2573 * in the extremely unlikely event of a GPU hang on this context 2574 * as we are querying them. If we need that extra layer of protection, 2575 * we should wrap the hangstats with a seqlock. 2576 */ 2577 2578 if (capable(CAP_SYS_ADMIN)) 2579 args->reset_count = i915_reset_count(&i915->gpu_error); 2580 else 2581 args->reset_count = 0; 2582 2583 args->batch_active = atomic_read(&ctx->guilty_count); 2584 args->batch_pending = atomic_read(&ctx->active_count); 2585 2586 ret = 0; 2587 out: 2588 rcu_read_unlock(); 2589 return ret; 2590 } 2591 2592 /* GEM context-engines iterator: for_each_gem_engine() */ 2593 struct intel_context * 2594 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2595 { 2596 const struct i915_gem_engines *e = it->engines; 2597 struct intel_context *ctx; 2598 2599 if (unlikely(!e)) 2600 return NULL; 2601 2602 do { 2603 if (it->idx >= e->num_engines) 2604 return NULL; 2605 2606 ctx = e->engines[it->idx++]; 2607 } while (!ctx); 2608 2609 return ctx; 2610 } 2611 2612 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2613 #include "selftests/mock_context.c" 2614 #include "selftests/i915_gem_context.c" 2615 #endif 2616 2617 static void i915_global_gem_context_shrink(void) 2618 { 2619 kmem_cache_shrink(global.slab_luts); 2620 } 2621 2622 static void i915_global_gem_context_exit(void) 2623 { 2624 kmem_cache_destroy(global.slab_luts); 2625 } 2626 2627 static struct i915_global_gem_context global = { { 2628 .shrink = i915_global_gem_context_shrink, 2629 .exit = i915_global_gem_context_exit, 2630 } }; 2631 2632 int __init i915_global_gem_context_init(void) 2633 { 2634 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2635 if (!global.slab_luts) 2636 return -ENOMEM; 2637 2638 i915_global_register(&global.base); 2639 return 0; 2640 } 2641