1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2011-2012 Intel Corporation 5 */ 6 7 /* 8 * This file implements HW context support. On gen5+ a HW context consists of an 9 * opaque GPU object which is referenced at times of context saves and restores. 10 * With RC6 enabled, the context is also referenced as the GPU enters and exists 11 * from RC6 (GPU has it's own internal power context, except on gen5). Though 12 * something like a context does exist for the media ring, the code only 13 * supports contexts for the render ring. 14 * 15 * In software, there is a distinction between contexts created by the user, 16 * and the default HW context. The default HW context is used by GPU clients 17 * that do not request setup of their own hardware context. The default 18 * context's state is never restored to help prevent programming errors. This 19 * would happen if a client ran and piggy-backed off another clients GPU state. 20 * The default context only exists to give the GPU some offset to load as the 21 * current to invoke a save of the context we actually care about. In fact, the 22 * code could likely be constructed, albeit in a more complicated fashion, to 23 * never use the default context, though that limits the driver's ability to 24 * swap out, and/or destroy other contexts. 25 * 26 * All other contexts are created as a request by the GPU client. These contexts 27 * store GPU state, and thus allow GPU clients to not re-emit state (and 28 * potentially query certain state) at any time. The kernel driver makes 29 * certain that the appropriate commands are inserted. 30 * 31 * The context life cycle is semi-complicated in that context BOs may live 32 * longer than the context itself because of the way the hardware, and object 33 * tracking works. Below is a very crude representation of the state machine 34 * describing the context life. 35 * refcount pincount active 36 * S0: initial state 0 0 0 37 * S1: context created 1 0 0 38 * S2: context is currently running 2 1 X 39 * S3: GPU referenced, but not current 2 0 1 40 * S4: context is current, but destroyed 1 1 0 41 * S5: like S3, but destroyed 1 0 1 42 * 43 * The most common (but not all) transitions: 44 * S0->S1: client creates a context 45 * S1->S2: client submits execbuf with context 46 * S2->S3: other clients submits execbuf with context 47 * S3->S1: context object was retired 48 * S3->S2: clients submits another execbuf 49 * S2->S4: context destroy called with current context 50 * S3->S5->S0: destroy path 51 * S4->S5->S0: destroy path on current context 52 * 53 * There are two confusing terms used above: 54 * The "current context" means the context which is currently running on the 55 * GPU. The GPU has loaded its state already and has stored away the gtt 56 * offset of the BO. The GPU is not actively referencing the data at this 57 * offset, but it will on the next context switch. The only way to avoid this 58 * is to do a GPU reset. 59 * 60 * An "active context' is one which was previously the "current context" and is 61 * on the active list waiting for the next context switch to occur. Until this 62 * happens, the object must remain at the same gtt offset. It is therefore 63 * possible to destroy a context, but it is still active. 64 * 65 */ 66 67 #include <linux/log2.h> 68 #include <linux/nospec.h> 69 70 #include "gt/gen6_ppgtt.h" 71 #include "gt/intel_context.h" 72 #include "gt/intel_context_param.h" 73 #include "gt/intel_engine_heartbeat.h" 74 #include "gt/intel_engine_user.h" 75 #include "gt/intel_ring.h" 76 77 #include "i915_gem_context.h" 78 #include "i915_globals.h" 79 #include "i915_trace.h" 80 #include "i915_user_extensions.h" 81 82 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 83 84 static struct i915_global_gem_context { 85 struct i915_global base; 86 struct kmem_cache *slab_luts; 87 } global; 88 89 struct i915_lut_handle *i915_lut_handle_alloc(void) 90 { 91 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); 92 } 93 94 void i915_lut_handle_free(struct i915_lut_handle *lut) 95 { 96 return kmem_cache_free(global.slab_luts, lut); 97 } 98 99 static void lut_close(struct i915_gem_context *ctx) 100 { 101 struct radix_tree_iter iter; 102 void __rcu **slot; 103 104 mutex_lock(&ctx->lut_mutex); 105 rcu_read_lock(); 106 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 107 struct i915_vma *vma = rcu_dereference_raw(*slot); 108 struct drm_i915_gem_object *obj = vma->obj; 109 struct i915_lut_handle *lut; 110 111 if (!kref_get_unless_zero(&obj->base.refcount)) 112 continue; 113 114 spin_lock(&obj->lut_lock); 115 list_for_each_entry(lut, &obj->lut_list, obj_link) { 116 if (lut->ctx != ctx) 117 continue; 118 119 if (lut->handle != iter.index) 120 continue; 121 122 list_del(&lut->obj_link); 123 break; 124 } 125 spin_unlock(&obj->lut_lock); 126 127 if (&lut->obj_link != &obj->lut_list) { 128 i915_lut_handle_free(lut); 129 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 130 i915_vma_close(vma); 131 i915_gem_object_put(obj); 132 } 133 134 i915_gem_object_put(obj); 135 } 136 rcu_read_unlock(); 137 mutex_unlock(&ctx->lut_mutex); 138 } 139 140 static struct intel_context * 141 lookup_user_engine(struct i915_gem_context *ctx, 142 unsigned long flags, 143 const struct i915_engine_class_instance *ci) 144 #define LOOKUP_USER_INDEX BIT(0) 145 { 146 int idx; 147 148 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 149 return ERR_PTR(-EINVAL); 150 151 if (!i915_gem_context_user_engines(ctx)) { 152 struct intel_engine_cs *engine; 153 154 engine = intel_engine_lookup_user(ctx->i915, 155 ci->engine_class, 156 ci->engine_instance); 157 if (!engine) 158 return ERR_PTR(-EINVAL); 159 160 idx = engine->legacy_idx; 161 } else { 162 idx = ci->engine_instance; 163 } 164 165 return i915_gem_context_get_engine(ctx, idx); 166 } 167 168 static struct i915_address_space * 169 context_get_vm_rcu(struct i915_gem_context *ctx) 170 { 171 GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); 172 173 do { 174 struct i915_address_space *vm; 175 176 /* 177 * We do not allow downgrading from full-ppgtt [to a shared 178 * global gtt], so ctx->vm cannot become NULL. 179 */ 180 vm = rcu_dereference(ctx->vm); 181 if (!kref_get_unless_zero(&vm->ref)) 182 continue; 183 184 /* 185 * This ppgtt may have be reallocated between 186 * the read and the kref, and reassigned to a third 187 * context. In order to avoid inadvertent sharing 188 * of this ppgtt with that third context (and not 189 * src), we have to confirm that we have the same 190 * ppgtt after passing through the strong memory 191 * barrier implied by a successful 192 * kref_get_unless_zero(). 193 * 194 * Once we have acquired the current ppgtt of ctx, 195 * we no longer care if it is released from ctx, as 196 * it cannot be reallocated elsewhere. 197 */ 198 199 if (vm == rcu_access_pointer(ctx->vm)) 200 return rcu_pointer_handoff(vm); 201 202 i915_vm_put(vm); 203 } while (1); 204 } 205 206 static void intel_context_set_gem(struct intel_context *ce, 207 struct i915_gem_context *ctx) 208 { 209 GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 210 RCU_INIT_POINTER(ce->gem_context, ctx); 211 212 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 213 ce->ring = __intel_context_ring_size(SZ_16K); 214 215 if (rcu_access_pointer(ctx->vm)) { 216 struct i915_address_space *vm; 217 218 rcu_read_lock(); 219 vm = context_get_vm_rcu(ctx); /* hmm */ 220 rcu_read_unlock(); 221 222 i915_vm_put(ce->vm); 223 ce->vm = vm; 224 } 225 226 GEM_BUG_ON(ce->timeline); 227 if (ctx->timeline) 228 ce->timeline = intel_timeline_get(ctx->timeline); 229 230 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 231 intel_engine_has_timeslices(ce->engine)) 232 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 233 } 234 235 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 236 { 237 while (count--) { 238 if (!e->engines[count]) 239 continue; 240 241 intel_context_put(e->engines[count]); 242 } 243 kfree(e); 244 } 245 246 static void free_engines(struct i915_gem_engines *e) 247 { 248 __free_engines(e, e->num_engines); 249 } 250 251 static void free_engines_rcu(struct rcu_head *rcu) 252 { 253 struct i915_gem_engines *engines = 254 container_of(rcu, struct i915_gem_engines, rcu); 255 256 i915_sw_fence_fini(&engines->fence); 257 free_engines(engines); 258 } 259 260 static int __i915_sw_fence_call 261 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 262 { 263 struct i915_gem_engines *engines = 264 container_of(fence, typeof(*engines), fence); 265 266 switch (state) { 267 case FENCE_COMPLETE: 268 if (!list_empty(&engines->link)) { 269 struct i915_gem_context *ctx = engines->ctx; 270 unsigned long flags; 271 272 spin_lock_irqsave(&ctx->stale.lock, flags); 273 list_del(&engines->link); 274 spin_unlock_irqrestore(&ctx->stale.lock, flags); 275 } 276 i915_gem_context_put(engines->ctx); 277 break; 278 279 case FENCE_FREE: 280 init_rcu_head(&engines->rcu); 281 call_rcu(&engines->rcu, free_engines_rcu); 282 break; 283 } 284 285 return NOTIFY_DONE; 286 } 287 288 static struct i915_gem_engines *alloc_engines(unsigned int count) 289 { 290 struct i915_gem_engines *e; 291 292 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL); 293 if (!e) 294 return NULL; 295 296 i915_sw_fence_init(&e->fence, engines_notify); 297 return e; 298 } 299 300 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) 301 { 302 const struct intel_gt *gt = &ctx->i915->gt; 303 struct intel_engine_cs *engine; 304 struct i915_gem_engines *e; 305 enum intel_engine_id id; 306 307 e = alloc_engines(I915_NUM_ENGINES); 308 if (!e) 309 return ERR_PTR(-ENOMEM); 310 311 for_each_engine(engine, gt, id) { 312 struct intel_context *ce; 313 314 if (engine->legacy_idx == INVALID_ENGINE) 315 continue; 316 317 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 318 GEM_BUG_ON(e->engines[engine->legacy_idx]); 319 320 ce = intel_context_create(engine); 321 if (IS_ERR(ce)) { 322 __free_engines(e, e->num_engines + 1); 323 return ERR_CAST(ce); 324 } 325 326 intel_context_set_gem(ce, ctx); 327 328 e->engines[engine->legacy_idx] = ce; 329 e->num_engines = max(e->num_engines, engine->legacy_idx); 330 } 331 e->num_engines++; 332 333 return e; 334 } 335 336 static void i915_gem_context_free(struct i915_gem_context *ctx) 337 { 338 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 339 340 spin_lock(&ctx->i915->gem.contexts.lock); 341 list_del(&ctx->link); 342 spin_unlock(&ctx->i915->gem.contexts.lock); 343 344 mutex_destroy(&ctx->engines_mutex); 345 mutex_destroy(&ctx->lut_mutex); 346 347 if (ctx->timeline) 348 intel_timeline_put(ctx->timeline); 349 350 put_pid(ctx->pid); 351 mutex_destroy(&ctx->mutex); 352 353 kfree_rcu(ctx, rcu); 354 } 355 356 static void contexts_free_all(struct llist_node *list) 357 { 358 struct i915_gem_context *ctx, *cn; 359 360 llist_for_each_entry_safe(ctx, cn, list, free_link) 361 i915_gem_context_free(ctx); 362 } 363 364 static void contexts_flush_free(struct i915_gem_contexts *gc) 365 { 366 contexts_free_all(llist_del_all(&gc->free_list)); 367 } 368 369 static void contexts_free_worker(struct work_struct *work) 370 { 371 struct i915_gem_contexts *gc = 372 container_of(work, typeof(*gc), free_work); 373 374 contexts_flush_free(gc); 375 } 376 377 void i915_gem_context_release(struct kref *ref) 378 { 379 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 380 struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; 381 382 trace_i915_context_free(ctx); 383 if (llist_add(&ctx->free_link, &gc->free_list)) 384 schedule_work(&gc->free_work); 385 } 386 387 static inline struct i915_gem_engines * 388 __context_engines_static(const struct i915_gem_context *ctx) 389 { 390 return rcu_dereference_protected(ctx->engines, true); 391 } 392 393 static bool __reset_engine(struct intel_engine_cs *engine) 394 { 395 struct intel_gt *gt = engine->gt; 396 bool success = false; 397 398 if (!intel_has_reset_engine(gt)) 399 return false; 400 401 if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, 402 >->reset.flags)) { 403 success = intel_engine_reset(engine, NULL) == 0; 404 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, 405 >->reset.flags); 406 } 407 408 return success; 409 } 410 411 static void __reset_context(struct i915_gem_context *ctx, 412 struct intel_engine_cs *engine) 413 { 414 intel_gt_handle_error(engine->gt, engine->mask, 0, 415 "context closure in %s", ctx->name); 416 } 417 418 static bool __cancel_engine(struct intel_engine_cs *engine) 419 { 420 /* 421 * Send a "high priority pulse" down the engine to cause the 422 * current request to be momentarily preempted. (If it fails to 423 * be preempted, it will be reset). As we have marked our context 424 * as banned, any incomplete request, including any running, will 425 * be skipped following the preemption. 426 * 427 * If there is no hangchecking (one of the reasons why we try to 428 * cancel the context) and no forced preemption, there may be no 429 * means by which we reset the GPU and evict the persistent hog. 430 * Ergo if we are unable to inject a preemptive pulse that can 431 * kill the banned context, we fallback to doing a local reset 432 * instead. 433 */ 434 if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && 435 !intel_engine_pulse(engine)) 436 return true; 437 438 /* If we are unable to send a pulse, try resetting this engine. */ 439 return __reset_engine(engine); 440 } 441 442 static bool 443 __active_engine(struct i915_request *rq, struct intel_engine_cs **active) 444 { 445 struct intel_engine_cs *engine, *locked; 446 bool ret = false; 447 448 /* 449 * Serialise with __i915_request_submit() so that it sees 450 * is-banned?, or we know the request is already inflight. 451 * 452 * Note that rq->engine is unstable, and so we double 453 * check that we have acquired the lock on the final engine. 454 */ 455 locked = READ_ONCE(rq->engine); 456 spin_lock_irq(&locked->active.lock); 457 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 458 spin_unlock(&locked->active.lock); 459 locked = engine; 460 spin_lock(&locked->active.lock); 461 } 462 463 if (!i915_request_completed(rq)) { 464 if (i915_request_is_active(rq) && rq->fence.error != -EIO) 465 *active = locked; 466 ret = true; 467 } 468 469 spin_unlock_irq(&locked->active.lock); 470 471 return ret; 472 } 473 474 static struct intel_engine_cs *active_engine(struct intel_context *ce) 475 { 476 struct intel_engine_cs *engine = NULL; 477 struct i915_request *rq; 478 479 if (!ce->timeline) 480 return NULL; 481 482 rcu_read_lock(); 483 list_for_each_entry_rcu(rq, &ce->timeline->requests, link) { 484 if (i915_request_is_active(rq) && i915_request_completed(rq)) 485 continue; 486 487 /* Check with the backend if the request is inflight */ 488 if (__active_engine(rq, &engine)) 489 break; 490 } 491 rcu_read_unlock(); 492 493 return engine; 494 } 495 496 static void kill_engines(struct i915_gem_engines *engines) 497 { 498 struct i915_gem_engines_iter it; 499 struct intel_context *ce; 500 501 /* 502 * Map the user's engine back to the actual engines; one virtual 503 * engine will be mapped to multiple engines, and using ctx->engine[] 504 * the same engine may be have multiple instances in the user's map. 505 * However, we only care about pending requests, so only include 506 * engines on which there are incomplete requests. 507 */ 508 for_each_gem_engine(ce, engines, it) { 509 struct intel_engine_cs *engine; 510 511 if (intel_context_set_banned(ce)) 512 continue; 513 514 /* 515 * Check the current active state of this context; if we 516 * are currently executing on the GPU we need to evict 517 * ourselves. On the other hand, if we haven't yet been 518 * submitted to the GPU or if everything is complete, 519 * we have nothing to do. 520 */ 521 engine = active_engine(ce); 522 523 /* First attempt to gracefully cancel the context */ 524 if (engine && !__cancel_engine(engine)) 525 /* 526 * If we are unable to send a preemptive pulse to bump 527 * the context from the GPU, we have to resort to a full 528 * reset. We hope the collateral damage is worth it. 529 */ 530 __reset_context(engines->ctx, engine); 531 } 532 } 533 534 static void kill_stale_engines(struct i915_gem_context *ctx) 535 { 536 struct i915_gem_engines *pos, *next; 537 538 spin_lock_irq(&ctx->stale.lock); 539 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 540 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { 541 if (!i915_sw_fence_await(&pos->fence)) { 542 list_del_init(&pos->link); 543 continue; 544 } 545 546 spin_unlock_irq(&ctx->stale.lock); 547 548 kill_engines(pos); 549 550 spin_lock_irq(&ctx->stale.lock); 551 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); 552 list_safe_reset_next(pos, next, link); 553 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ 554 555 i915_sw_fence_complete(&pos->fence); 556 } 557 spin_unlock_irq(&ctx->stale.lock); 558 } 559 560 static void kill_context(struct i915_gem_context *ctx) 561 { 562 kill_stale_engines(ctx); 563 } 564 565 static void engines_idle_release(struct i915_gem_context *ctx, 566 struct i915_gem_engines *engines) 567 { 568 struct i915_gem_engines_iter it; 569 struct intel_context *ce; 570 571 INIT_LIST_HEAD(&engines->link); 572 573 engines->ctx = i915_gem_context_get(ctx); 574 575 for_each_gem_engine(ce, engines, it) { 576 int err; 577 578 /* serialises with execbuf */ 579 set_bit(CONTEXT_CLOSED_BIT, &ce->flags); 580 if (!intel_context_pin_if_active(ce)) 581 continue; 582 583 /* Wait until context is finally scheduled out and retired */ 584 err = i915_sw_fence_await_active(&engines->fence, 585 &ce->active, 586 I915_ACTIVE_AWAIT_BARRIER); 587 intel_context_unpin(ce); 588 if (err) 589 goto kill; 590 } 591 592 spin_lock_irq(&ctx->stale.lock); 593 if (!i915_gem_context_is_closed(ctx)) 594 list_add_tail(&engines->link, &ctx->stale.engines); 595 spin_unlock_irq(&ctx->stale.lock); 596 597 kill: 598 if (list_empty(&engines->link)) /* raced, already closed */ 599 kill_engines(engines); 600 601 i915_sw_fence_commit(&engines->fence); 602 } 603 604 static void set_closed_name(struct i915_gem_context *ctx) 605 { 606 char *s; 607 608 /* Replace '[]' with '<>' to indicate closed in debug prints */ 609 610 s = strrchr(ctx->name, '['); 611 if (!s) 612 return; 613 614 *s = '<'; 615 616 s = strchr(s + 1, ']'); 617 if (s) 618 *s = '>'; 619 } 620 621 static void context_close(struct i915_gem_context *ctx) 622 { 623 struct i915_address_space *vm; 624 625 /* Flush any concurrent set_engines() */ 626 mutex_lock(&ctx->engines_mutex); 627 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); 628 i915_gem_context_set_closed(ctx); 629 mutex_unlock(&ctx->engines_mutex); 630 631 mutex_lock(&ctx->mutex); 632 633 set_closed_name(ctx); 634 635 vm = i915_gem_context_vm(ctx); 636 if (vm) 637 i915_vm_close(vm); 638 639 ctx->file_priv = ERR_PTR(-EBADF); 640 641 /* 642 * The LUT uses the VMA as a backpointer to unref the object, 643 * so we need to clear the LUT before we close all the VMA (inside 644 * the ppgtt). 645 */ 646 lut_close(ctx); 647 648 mutex_unlock(&ctx->mutex); 649 650 /* 651 * If the user has disabled hangchecking, we can not be sure that 652 * the batches will ever complete after the context is closed, 653 * keeping the context and all resources pinned forever. So in this 654 * case we opt to forcibly kill off all remaining requests on 655 * context close. 656 */ 657 if (!i915_gem_context_is_persistent(ctx) || 658 !ctx->i915->params.enable_hangcheck) 659 kill_context(ctx); 660 661 i915_gem_context_put(ctx); 662 } 663 664 static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 665 { 666 if (i915_gem_context_is_persistent(ctx) == state) 667 return 0; 668 669 if (state) { 670 /* 671 * Only contexts that are short-lived [that will expire or be 672 * reset] are allowed to survive past termination. We require 673 * hangcheck to ensure that the persistent requests are healthy. 674 */ 675 if (!ctx->i915->params.enable_hangcheck) 676 return -EINVAL; 677 678 i915_gem_context_set_persistence(ctx); 679 } else { 680 /* To cancel a context we use "preempt-to-idle" */ 681 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 682 return -ENODEV; 683 684 /* 685 * If the cancel fails, we then need to reset, cleanly! 686 * 687 * If the per-engine reset fails, all hope is lost! We resort 688 * to a full GPU reset in that unlikely case, but realistically 689 * if the engine could not reset, the full reset does not fare 690 * much better. The damage has been done. 691 * 692 * However, if we cannot reset an engine by itself, we cannot 693 * cleanup a hanging persistent context without causing 694 * colateral damage, and we should not pretend we can by 695 * exposing the interface. 696 */ 697 if (!intel_has_reset_engine(&ctx->i915->gt)) 698 return -ENODEV; 699 700 i915_gem_context_clear_persistence(ctx); 701 } 702 703 return 0; 704 } 705 706 static struct i915_gem_context * 707 __create_context(struct drm_i915_private *i915) 708 { 709 struct i915_gem_context *ctx; 710 struct i915_gem_engines *e; 711 int err; 712 int i; 713 714 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 715 if (!ctx) 716 return ERR_PTR(-ENOMEM); 717 718 kref_init(&ctx->ref); 719 ctx->i915 = i915; 720 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); 721 mutex_init(&ctx->mutex); 722 INIT_LIST_HEAD(&ctx->link); 723 724 spin_lock_init(&ctx->stale.lock); 725 INIT_LIST_HEAD(&ctx->stale.engines); 726 727 mutex_init(&ctx->engines_mutex); 728 e = default_engines(ctx); 729 if (IS_ERR(e)) { 730 err = PTR_ERR(e); 731 goto err_free; 732 } 733 RCU_INIT_POINTER(ctx->engines, e); 734 735 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 736 mutex_init(&ctx->lut_mutex); 737 738 /* NB: Mark all slices as needing a remap so that when the context first 739 * loads it will restore whatever remap state already exists. If there 740 * is no remap info, it will be a NOP. */ 741 ctx->remap_slice = ALL_L3_SLICES(i915); 742 743 i915_gem_context_set_bannable(ctx); 744 i915_gem_context_set_recoverable(ctx); 745 __context_set_persistence(ctx, true /* cgroup hook? */); 746 747 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 748 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 749 750 return ctx; 751 752 err_free: 753 kfree(ctx); 754 return ERR_PTR(err); 755 } 756 757 static inline struct i915_gem_engines * 758 __context_engines_await(const struct i915_gem_context *ctx) 759 { 760 struct i915_gem_engines *engines; 761 762 rcu_read_lock(); 763 do { 764 engines = rcu_dereference(ctx->engines); 765 GEM_BUG_ON(!engines); 766 767 if (unlikely(!i915_sw_fence_await(&engines->fence))) 768 continue; 769 770 if (likely(engines == rcu_access_pointer(ctx->engines))) 771 break; 772 773 i915_sw_fence_complete(&engines->fence); 774 } while (1); 775 rcu_read_unlock(); 776 777 return engines; 778 } 779 780 static int 781 context_apply_all(struct i915_gem_context *ctx, 782 int (*fn)(struct intel_context *ce, void *data), 783 void *data) 784 { 785 struct i915_gem_engines_iter it; 786 struct i915_gem_engines *e; 787 struct intel_context *ce; 788 int err = 0; 789 790 e = __context_engines_await(ctx); 791 for_each_gem_engine(ce, e, it) { 792 err = fn(ce, data); 793 if (err) 794 break; 795 } 796 i915_sw_fence_complete(&e->fence); 797 798 return err; 799 } 800 801 static int __apply_ppgtt(struct intel_context *ce, void *vm) 802 { 803 i915_vm_put(ce->vm); 804 ce->vm = i915_vm_get(vm); 805 return 0; 806 } 807 808 static struct i915_address_space * 809 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) 810 { 811 struct i915_address_space *old; 812 813 old = rcu_replace_pointer(ctx->vm, 814 i915_vm_open(vm), 815 lockdep_is_held(&ctx->mutex)); 816 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); 817 818 context_apply_all(ctx, __apply_ppgtt, vm); 819 820 return old; 821 } 822 823 static void __assign_ppgtt(struct i915_gem_context *ctx, 824 struct i915_address_space *vm) 825 { 826 if (vm == rcu_access_pointer(ctx->vm)) 827 return; 828 829 vm = __set_ppgtt(ctx, vm); 830 if (vm) 831 i915_vm_close(vm); 832 } 833 834 static void __set_timeline(struct intel_timeline **dst, 835 struct intel_timeline *src) 836 { 837 struct intel_timeline *old = *dst; 838 839 *dst = src ? intel_timeline_get(src) : NULL; 840 841 if (old) 842 intel_timeline_put(old); 843 } 844 845 static int __apply_timeline(struct intel_context *ce, void *timeline) 846 { 847 __set_timeline(&ce->timeline, timeline); 848 return 0; 849 } 850 851 static void __assign_timeline(struct i915_gem_context *ctx, 852 struct intel_timeline *timeline) 853 { 854 __set_timeline(&ctx->timeline, timeline); 855 context_apply_all(ctx, __apply_timeline, timeline); 856 } 857 858 static struct i915_gem_context * 859 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) 860 { 861 struct i915_gem_context *ctx; 862 863 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && 864 !HAS_EXECLISTS(i915)) 865 return ERR_PTR(-EINVAL); 866 867 /* Reap the stale contexts */ 868 contexts_flush_free(&i915->gem.contexts); 869 870 ctx = __create_context(i915); 871 if (IS_ERR(ctx)) 872 return ctx; 873 874 if (HAS_FULL_PPGTT(i915)) { 875 struct i915_ppgtt *ppgtt; 876 877 ppgtt = i915_ppgtt_create(&i915->gt); 878 if (IS_ERR(ppgtt)) { 879 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", 880 PTR_ERR(ppgtt)); 881 context_close(ctx); 882 return ERR_CAST(ppgtt); 883 } 884 885 mutex_lock(&ctx->mutex); 886 __assign_ppgtt(ctx, &ppgtt->vm); 887 mutex_unlock(&ctx->mutex); 888 889 i915_vm_put(&ppgtt->vm); 890 } 891 892 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 893 struct intel_timeline *timeline; 894 895 timeline = intel_timeline_create(&i915->gt); 896 if (IS_ERR(timeline)) { 897 context_close(ctx); 898 return ERR_CAST(timeline); 899 } 900 901 __assign_timeline(ctx, timeline); 902 intel_timeline_put(timeline); 903 } 904 905 trace_i915_context_create(ctx); 906 907 return ctx; 908 } 909 910 static void init_contexts(struct i915_gem_contexts *gc) 911 { 912 spin_lock_init(&gc->lock); 913 INIT_LIST_HEAD(&gc->list); 914 915 INIT_WORK(&gc->free_work, contexts_free_worker); 916 init_llist_head(&gc->free_list); 917 } 918 919 void i915_gem_init__contexts(struct drm_i915_private *i915) 920 { 921 init_contexts(&i915->gem.contexts); 922 drm_dbg(&i915->drm, "%s context support initialized\n", 923 DRIVER_CAPS(i915)->has_logical_contexts ? 924 "logical" : "fake"); 925 } 926 927 void i915_gem_driver_release__contexts(struct drm_i915_private *i915) 928 { 929 flush_work(&i915->gem.contexts.free_work); 930 rcu_barrier(); /* and flush the left over RCU frees */ 931 } 932 933 static int gem_context_register(struct i915_gem_context *ctx, 934 struct drm_i915_file_private *fpriv, 935 u32 *id) 936 { 937 struct drm_i915_private *i915 = ctx->i915; 938 struct i915_address_space *vm; 939 int ret; 940 941 ctx->file_priv = fpriv; 942 943 mutex_lock(&ctx->mutex); 944 vm = i915_gem_context_vm(ctx); 945 if (vm) 946 WRITE_ONCE(vm->file, fpriv); /* XXX */ 947 mutex_unlock(&ctx->mutex); 948 949 ctx->pid = get_task_pid(current, PIDTYPE_PID); 950 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 951 current->comm, pid_nr(ctx->pid)); 952 953 /* And finally expose ourselves to userspace via the idr */ 954 ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); 955 if (ret) 956 goto err_pid; 957 958 spin_lock(&i915->gem.contexts.lock); 959 list_add_tail(&ctx->link, &i915->gem.contexts.list); 960 spin_unlock(&i915->gem.contexts.lock); 961 962 return 0; 963 964 err_pid: 965 put_pid(fetch_and_zero(&ctx->pid)); 966 return ret; 967 } 968 969 int i915_gem_context_open(struct drm_i915_private *i915, 970 struct drm_file *file) 971 { 972 struct drm_i915_file_private *file_priv = file->driver_priv; 973 struct i915_gem_context *ctx; 974 int err; 975 u32 id; 976 977 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); 978 979 /* 0 reserved for invalid/unassigned ppgtt */ 980 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); 981 982 ctx = i915_gem_create_context(i915, 0); 983 if (IS_ERR(ctx)) { 984 err = PTR_ERR(ctx); 985 goto err; 986 } 987 988 err = gem_context_register(ctx, file_priv, &id); 989 if (err < 0) 990 goto err_ctx; 991 992 GEM_BUG_ON(id); 993 return 0; 994 995 err_ctx: 996 context_close(ctx); 997 err: 998 xa_destroy(&file_priv->vm_xa); 999 xa_destroy(&file_priv->context_xa); 1000 return err; 1001 } 1002 1003 void i915_gem_context_close(struct drm_file *file) 1004 { 1005 struct drm_i915_file_private *file_priv = file->driver_priv; 1006 struct drm_i915_private *i915 = file_priv->dev_priv; 1007 struct i915_address_space *vm; 1008 struct i915_gem_context *ctx; 1009 unsigned long idx; 1010 1011 xa_for_each(&file_priv->context_xa, idx, ctx) 1012 context_close(ctx); 1013 xa_destroy(&file_priv->context_xa); 1014 1015 xa_for_each(&file_priv->vm_xa, idx, vm) 1016 i915_vm_put(vm); 1017 xa_destroy(&file_priv->vm_xa); 1018 1019 contexts_flush_free(&i915->gem.contexts); 1020 } 1021 1022 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 1023 struct drm_file *file) 1024 { 1025 struct drm_i915_private *i915 = to_i915(dev); 1026 struct drm_i915_gem_vm_control *args = data; 1027 struct drm_i915_file_private *file_priv = file->driver_priv; 1028 struct i915_ppgtt *ppgtt; 1029 u32 id; 1030 int err; 1031 1032 if (!HAS_FULL_PPGTT(i915)) 1033 return -ENODEV; 1034 1035 if (args->flags) 1036 return -EINVAL; 1037 1038 ppgtt = i915_ppgtt_create(&i915->gt); 1039 if (IS_ERR(ppgtt)) 1040 return PTR_ERR(ppgtt); 1041 1042 ppgtt->vm.file = file_priv; 1043 1044 if (args->extensions) { 1045 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 1046 NULL, 0, 1047 ppgtt); 1048 if (err) 1049 goto err_put; 1050 } 1051 1052 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, 1053 xa_limit_32b, GFP_KERNEL); 1054 if (err) 1055 goto err_put; 1056 1057 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1058 args->vm_id = id; 1059 return 0; 1060 1061 err_put: 1062 i915_vm_put(&ppgtt->vm); 1063 return err; 1064 } 1065 1066 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 1067 struct drm_file *file) 1068 { 1069 struct drm_i915_file_private *file_priv = file->driver_priv; 1070 struct drm_i915_gem_vm_control *args = data; 1071 struct i915_address_space *vm; 1072 1073 if (args->flags) 1074 return -EINVAL; 1075 1076 if (args->extensions) 1077 return -EINVAL; 1078 1079 vm = xa_erase(&file_priv->vm_xa, args->vm_id); 1080 if (!vm) 1081 return -ENOENT; 1082 1083 i915_vm_put(vm); 1084 return 0; 1085 } 1086 1087 struct context_barrier_task { 1088 struct i915_active base; 1089 void (*task)(void *data); 1090 void *data; 1091 }; 1092 1093 __i915_active_call 1094 static void cb_retire(struct i915_active *base) 1095 { 1096 struct context_barrier_task *cb = container_of(base, typeof(*cb), base); 1097 1098 if (cb->task) 1099 cb->task(cb->data); 1100 1101 i915_active_fini(&cb->base); 1102 kfree(cb); 1103 } 1104 1105 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); 1106 static int context_barrier_task(struct i915_gem_context *ctx, 1107 intel_engine_mask_t engines, 1108 bool (*skip)(struct intel_context *ce, void *data), 1109 int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data), 1110 int (*emit)(struct i915_request *rq, void *data), 1111 void (*task)(void *data), 1112 void *data) 1113 { 1114 struct context_barrier_task *cb; 1115 struct i915_gem_engines_iter it; 1116 struct i915_gem_engines *e; 1117 struct i915_gem_ww_ctx ww; 1118 struct intel_context *ce; 1119 int err = 0; 1120 1121 GEM_BUG_ON(!task); 1122 1123 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 1124 if (!cb) 1125 return -ENOMEM; 1126 1127 i915_active_init(&cb->base, NULL, cb_retire); 1128 err = i915_active_acquire(&cb->base); 1129 if (err) { 1130 kfree(cb); 1131 return err; 1132 } 1133 1134 e = __context_engines_await(ctx); 1135 if (!e) { 1136 i915_active_release(&cb->base); 1137 return -ENOENT; 1138 } 1139 1140 for_each_gem_engine(ce, e, it) { 1141 struct i915_request *rq; 1142 1143 if (I915_SELFTEST_ONLY(context_barrier_inject_fault & 1144 ce->engine->mask)) { 1145 err = -ENXIO; 1146 break; 1147 } 1148 1149 if (!(ce->engine->mask & engines)) 1150 continue; 1151 1152 if (skip && skip(ce, data)) 1153 continue; 1154 1155 i915_gem_ww_ctx_init(&ww, true); 1156 retry: 1157 err = intel_context_pin_ww(ce, &ww); 1158 if (err) 1159 goto err; 1160 1161 if (pin) 1162 err = pin(ce, &ww, data); 1163 if (err) 1164 goto err_unpin; 1165 1166 rq = i915_request_create(ce); 1167 if (IS_ERR(rq)) { 1168 err = PTR_ERR(rq); 1169 goto err_unpin; 1170 } 1171 1172 err = 0; 1173 if (emit) 1174 err = emit(rq, data); 1175 if (err == 0) 1176 err = i915_active_add_request(&cb->base, rq); 1177 1178 i915_request_add(rq); 1179 err_unpin: 1180 intel_context_unpin(ce); 1181 err: 1182 if (err == -EDEADLK) { 1183 err = i915_gem_ww_ctx_backoff(&ww); 1184 if (!err) 1185 goto retry; 1186 } 1187 i915_gem_ww_ctx_fini(&ww); 1188 1189 if (err) 1190 break; 1191 } 1192 i915_sw_fence_complete(&e->fence); 1193 1194 cb->task = err ? NULL : task; /* caller needs to unwind instead */ 1195 cb->data = data; 1196 1197 i915_active_release(&cb->base); 1198 1199 return err; 1200 } 1201 1202 static int get_ppgtt(struct drm_i915_file_private *file_priv, 1203 struct i915_gem_context *ctx, 1204 struct drm_i915_gem_context_param *args) 1205 { 1206 struct i915_address_space *vm; 1207 int err; 1208 u32 id; 1209 1210 if (!rcu_access_pointer(ctx->vm)) 1211 return -ENODEV; 1212 1213 rcu_read_lock(); 1214 vm = context_get_vm_rcu(ctx); 1215 rcu_read_unlock(); 1216 if (!vm) 1217 return -ENODEV; 1218 1219 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); 1220 if (err) 1221 goto err_put; 1222 1223 i915_vm_open(vm); 1224 1225 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */ 1226 args->value = id; 1227 args->size = 0; 1228 1229 err_put: 1230 i915_vm_put(vm); 1231 return err; 1232 } 1233 1234 static void set_ppgtt_barrier(void *data) 1235 { 1236 struct i915_address_space *old = data; 1237 1238 if (INTEL_GEN(old->i915) < 8) 1239 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); 1240 1241 i915_vm_close(old); 1242 } 1243 1244 static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data) 1245 { 1246 struct i915_address_space *vm = ce->vm; 1247 1248 if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915)) 1249 /* ppGTT is not part of the legacy context image */ 1250 return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww); 1251 1252 return 0; 1253 } 1254 1255 static int emit_ppgtt_update(struct i915_request *rq, void *data) 1256 { 1257 struct i915_address_space *vm = rq->context->vm; 1258 struct intel_engine_cs *engine = rq->engine; 1259 u32 base = engine->mmio_base; 1260 u32 *cs; 1261 int i; 1262 1263 if (i915_vm_is_4lvl(vm)) { 1264 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1265 const dma_addr_t pd_daddr = px_dma(ppgtt->pd); 1266 1267 cs = intel_ring_begin(rq, 6); 1268 if (IS_ERR(cs)) 1269 return PTR_ERR(cs); 1270 1271 *cs++ = MI_LOAD_REGISTER_IMM(2); 1272 1273 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 1274 *cs++ = upper_32_bits(pd_daddr); 1275 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 1276 *cs++ = lower_32_bits(pd_daddr); 1277 1278 *cs++ = MI_NOOP; 1279 intel_ring_advance(rq, cs); 1280 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { 1281 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1282 int err; 1283 1284 /* Magic required to prevent forcewake errors! */ 1285 err = engine->emit_flush(rq, EMIT_INVALIDATE); 1286 if (err) 1287 return err; 1288 1289 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 1290 if (IS_ERR(cs)) 1291 return PTR_ERR(cs); 1292 1293 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 1294 for (i = GEN8_3LVL_PDPES; i--; ) { 1295 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1296 1297 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 1298 *cs++ = upper_32_bits(pd_daddr); 1299 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 1300 *cs++ = lower_32_bits(pd_daddr); 1301 } 1302 *cs++ = MI_NOOP; 1303 intel_ring_advance(rq, cs); 1304 } 1305 1306 return 0; 1307 } 1308 1309 static bool skip_ppgtt_update(struct intel_context *ce, void *data) 1310 { 1311 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) 1312 return !ce->state; 1313 else 1314 return !atomic_read(&ce->pin_count); 1315 } 1316 1317 static int set_ppgtt(struct drm_i915_file_private *file_priv, 1318 struct i915_gem_context *ctx, 1319 struct drm_i915_gem_context_param *args) 1320 { 1321 struct i915_address_space *vm, *old; 1322 int err; 1323 1324 if (args->size) 1325 return -EINVAL; 1326 1327 if (!rcu_access_pointer(ctx->vm)) 1328 return -ENODEV; 1329 1330 if (upper_32_bits(args->value)) 1331 return -ENOENT; 1332 1333 rcu_read_lock(); 1334 vm = xa_load(&file_priv->vm_xa, args->value); 1335 if (vm && !kref_get_unless_zero(&vm->ref)) 1336 vm = NULL; 1337 rcu_read_unlock(); 1338 if (!vm) 1339 return -ENOENT; 1340 1341 err = mutex_lock_interruptible(&ctx->mutex); 1342 if (err) 1343 goto out; 1344 1345 if (i915_gem_context_is_closed(ctx)) { 1346 err = -ENOENT; 1347 goto unlock; 1348 } 1349 1350 if (vm == rcu_access_pointer(ctx->vm)) 1351 goto unlock; 1352 1353 old = __set_ppgtt(ctx, vm); 1354 1355 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ 1356 lut_close(ctx); 1357 1358 /* 1359 * We need to flush any requests using the current ppgtt before 1360 * we release it as the requests do not hold a reference themselves, 1361 * only indirectly through the context. 1362 */ 1363 err = context_barrier_task(ctx, ALL_ENGINES, 1364 skip_ppgtt_update, 1365 pin_ppgtt_update, 1366 emit_ppgtt_update, 1367 set_ppgtt_barrier, 1368 old); 1369 if (err) { 1370 i915_vm_close(__set_ppgtt(ctx, old)); 1371 i915_vm_close(old); 1372 lut_close(ctx); /* force a rebuild of the old obj:vma cache */ 1373 } 1374 1375 unlock: 1376 mutex_unlock(&ctx->mutex); 1377 out: 1378 i915_vm_put(vm); 1379 return err; 1380 } 1381 1382 static int __apply_ringsize(struct intel_context *ce, void *sz) 1383 { 1384 return intel_context_set_ring_size(ce, (unsigned long)sz); 1385 } 1386 1387 static int set_ringsize(struct i915_gem_context *ctx, 1388 struct drm_i915_gem_context_param *args) 1389 { 1390 if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915)) 1391 return -ENODEV; 1392 1393 if (args->size) 1394 return -EINVAL; 1395 1396 if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE)) 1397 return -EINVAL; 1398 1399 if (args->value < I915_GTT_PAGE_SIZE) 1400 return -EINVAL; 1401 1402 if (args->value > 128 * I915_GTT_PAGE_SIZE) 1403 return -EINVAL; 1404 1405 return context_apply_all(ctx, 1406 __apply_ringsize, 1407 __intel_context_ring_size(args->value)); 1408 } 1409 1410 static int __get_ringsize(struct intel_context *ce, void *arg) 1411 { 1412 long sz; 1413 1414 sz = intel_context_get_ring_size(ce); 1415 GEM_BUG_ON(sz > INT_MAX); 1416 1417 return sz; /* stop on first engine */ 1418 } 1419 1420 static int get_ringsize(struct i915_gem_context *ctx, 1421 struct drm_i915_gem_context_param *args) 1422 { 1423 int sz; 1424 1425 if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915)) 1426 return -ENODEV; 1427 1428 if (args->size) 1429 return -EINVAL; 1430 1431 sz = context_apply_all(ctx, __get_ringsize, NULL); 1432 if (sz < 0) 1433 return sz; 1434 1435 args->value = sz; 1436 return 0; 1437 } 1438 1439 int 1440 i915_gem_user_to_context_sseu(struct intel_gt *gt, 1441 const struct drm_i915_gem_context_param_sseu *user, 1442 struct intel_sseu *context) 1443 { 1444 const struct sseu_dev_info *device = >->info.sseu; 1445 struct drm_i915_private *i915 = gt->i915; 1446 1447 /* No zeros in any field. */ 1448 if (!user->slice_mask || !user->subslice_mask || 1449 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1450 return -EINVAL; 1451 1452 /* Max > min. */ 1453 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1454 return -EINVAL; 1455 1456 /* 1457 * Some future proofing on the types since the uAPI is wider than the 1458 * current internal implementation. 1459 */ 1460 if (overflows_type(user->slice_mask, context->slice_mask) || 1461 overflows_type(user->subslice_mask, context->subslice_mask) || 1462 overflows_type(user->min_eus_per_subslice, 1463 context->min_eus_per_subslice) || 1464 overflows_type(user->max_eus_per_subslice, 1465 context->max_eus_per_subslice)) 1466 return -EINVAL; 1467 1468 /* Check validity against hardware. */ 1469 if (user->slice_mask & ~device->slice_mask) 1470 return -EINVAL; 1471 1472 if (user->subslice_mask & ~device->subslice_mask[0]) 1473 return -EINVAL; 1474 1475 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1476 return -EINVAL; 1477 1478 context->slice_mask = user->slice_mask; 1479 context->subslice_mask = user->subslice_mask; 1480 context->min_eus_per_subslice = user->min_eus_per_subslice; 1481 context->max_eus_per_subslice = user->max_eus_per_subslice; 1482 1483 /* Part specific restrictions. */ 1484 if (IS_GEN(i915, 11)) { 1485 unsigned int hw_s = hweight8(device->slice_mask); 1486 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 1487 unsigned int req_s = hweight8(context->slice_mask); 1488 unsigned int req_ss = hweight8(context->subslice_mask); 1489 1490 /* 1491 * Only full subslice enablement is possible if more than one 1492 * slice is turned on. 1493 */ 1494 if (req_s > 1 && req_ss != hw_ss_per_s) 1495 return -EINVAL; 1496 1497 /* 1498 * If more than four (SScount bitfield limit) subslices are 1499 * requested then the number has to be even. 1500 */ 1501 if (req_ss > 4 && (req_ss & 1)) 1502 return -EINVAL; 1503 1504 /* 1505 * If only one slice is enabled and subslice count is below the 1506 * device full enablement, it must be at most half of the all 1507 * available subslices. 1508 */ 1509 if (req_s == 1 && req_ss < hw_ss_per_s && 1510 req_ss > (hw_ss_per_s / 2)) 1511 return -EINVAL; 1512 1513 /* ABI restriction - VME use case only. */ 1514 1515 /* All slices or one slice only. */ 1516 if (req_s != 1 && req_s != hw_s) 1517 return -EINVAL; 1518 1519 /* 1520 * Half subslices or full enablement only when one slice is 1521 * enabled. 1522 */ 1523 if (req_s == 1 && 1524 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1525 return -EINVAL; 1526 1527 /* No EU configuration changes. */ 1528 if ((user->min_eus_per_subslice != 1529 device->max_eus_per_subslice) || 1530 (user->max_eus_per_subslice != 1531 device->max_eus_per_subslice)) 1532 return -EINVAL; 1533 } 1534 1535 return 0; 1536 } 1537 1538 static int set_sseu(struct i915_gem_context *ctx, 1539 struct drm_i915_gem_context_param *args) 1540 { 1541 struct drm_i915_private *i915 = ctx->i915; 1542 struct drm_i915_gem_context_param_sseu user_sseu; 1543 struct intel_context *ce; 1544 struct intel_sseu sseu; 1545 unsigned long lookup; 1546 int ret; 1547 1548 if (args->size < sizeof(user_sseu)) 1549 return -EINVAL; 1550 1551 if (!IS_GEN(i915, 11)) 1552 return -ENODEV; 1553 1554 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1555 sizeof(user_sseu))) 1556 return -EFAULT; 1557 1558 if (user_sseu.rsvd) 1559 return -EINVAL; 1560 1561 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1562 return -EINVAL; 1563 1564 lookup = 0; 1565 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 1566 lookup |= LOOKUP_USER_INDEX; 1567 1568 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 1569 if (IS_ERR(ce)) 1570 return PTR_ERR(ce); 1571 1572 /* Only render engine supports RPCS configuration. */ 1573 if (ce->engine->class != RENDER_CLASS) { 1574 ret = -ENODEV; 1575 goto out_ce; 1576 } 1577 1578 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); 1579 if (ret) 1580 goto out_ce; 1581 1582 ret = intel_context_reconfigure_sseu(ce, sseu); 1583 if (ret) 1584 goto out_ce; 1585 1586 args->size = sizeof(user_sseu); 1587 1588 out_ce: 1589 intel_context_put(ce); 1590 return ret; 1591 } 1592 1593 struct set_engines { 1594 struct i915_gem_context *ctx; 1595 struct i915_gem_engines *engines; 1596 }; 1597 1598 static int 1599 set_engines__load_balance(struct i915_user_extension __user *base, void *data) 1600 { 1601 struct i915_context_engines_load_balance __user *ext = 1602 container_of_user(base, typeof(*ext), base); 1603 const struct set_engines *set = data; 1604 struct drm_i915_private *i915 = set->ctx->i915; 1605 struct intel_engine_cs *stack[16]; 1606 struct intel_engine_cs **siblings; 1607 struct intel_context *ce; 1608 u16 num_siblings, idx; 1609 unsigned int n; 1610 int err; 1611 1612 if (!HAS_EXECLISTS(i915)) 1613 return -ENODEV; 1614 1615 if (intel_uc_uses_guc_submission(&i915->gt.uc)) 1616 return -ENODEV; /* not implement yet */ 1617 1618 if (get_user(idx, &ext->engine_index)) 1619 return -EFAULT; 1620 1621 if (idx >= set->engines->num_engines) { 1622 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", 1623 idx, set->engines->num_engines); 1624 return -EINVAL; 1625 } 1626 1627 idx = array_index_nospec(idx, set->engines->num_engines); 1628 if (set->engines->engines[idx]) { 1629 drm_dbg(&i915->drm, 1630 "Invalid placement[%d], already occupied\n", idx); 1631 return -EEXIST; 1632 } 1633 1634 if (get_user(num_siblings, &ext->num_siblings)) 1635 return -EFAULT; 1636 1637 err = check_user_mbz(&ext->flags); 1638 if (err) 1639 return err; 1640 1641 err = check_user_mbz(&ext->mbz64); 1642 if (err) 1643 return err; 1644 1645 siblings = stack; 1646 if (num_siblings > ARRAY_SIZE(stack)) { 1647 siblings = kmalloc_array(num_siblings, 1648 sizeof(*siblings), 1649 GFP_KERNEL); 1650 if (!siblings) 1651 return -ENOMEM; 1652 } 1653 1654 for (n = 0; n < num_siblings; n++) { 1655 struct i915_engine_class_instance ci; 1656 1657 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 1658 err = -EFAULT; 1659 goto out_siblings; 1660 } 1661 1662 siblings[n] = intel_engine_lookup_user(i915, 1663 ci.engine_class, 1664 ci.engine_instance); 1665 if (!siblings[n]) { 1666 drm_dbg(&i915->drm, 1667 "Invalid sibling[%d]: { class:%d, inst:%d }\n", 1668 n, ci.engine_class, ci.engine_instance); 1669 err = -EINVAL; 1670 goto out_siblings; 1671 } 1672 } 1673 1674 ce = intel_execlists_create_virtual(siblings, n); 1675 if (IS_ERR(ce)) { 1676 err = PTR_ERR(ce); 1677 goto out_siblings; 1678 } 1679 1680 intel_context_set_gem(ce, set->ctx); 1681 1682 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { 1683 intel_context_put(ce); 1684 err = -EEXIST; 1685 goto out_siblings; 1686 } 1687 1688 out_siblings: 1689 if (siblings != stack) 1690 kfree(siblings); 1691 1692 return err; 1693 } 1694 1695 static int 1696 set_engines__bond(struct i915_user_extension __user *base, void *data) 1697 { 1698 struct i915_context_engines_bond __user *ext = 1699 container_of_user(base, typeof(*ext), base); 1700 const struct set_engines *set = data; 1701 struct drm_i915_private *i915 = set->ctx->i915; 1702 struct i915_engine_class_instance ci; 1703 struct intel_engine_cs *virtual; 1704 struct intel_engine_cs *master; 1705 u16 idx, num_bonds; 1706 int err, n; 1707 1708 if (get_user(idx, &ext->virtual_index)) 1709 return -EFAULT; 1710 1711 if (idx >= set->engines->num_engines) { 1712 drm_dbg(&i915->drm, 1713 "Invalid index for virtual engine: %d >= %d\n", 1714 idx, set->engines->num_engines); 1715 return -EINVAL; 1716 } 1717 1718 idx = array_index_nospec(idx, set->engines->num_engines); 1719 if (!set->engines->engines[idx]) { 1720 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); 1721 return -EINVAL; 1722 } 1723 virtual = set->engines->engines[idx]->engine; 1724 1725 err = check_user_mbz(&ext->flags); 1726 if (err) 1727 return err; 1728 1729 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 1730 err = check_user_mbz(&ext->mbz64[n]); 1731 if (err) 1732 return err; 1733 } 1734 1735 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 1736 return -EFAULT; 1737 1738 master = intel_engine_lookup_user(i915, 1739 ci.engine_class, ci.engine_instance); 1740 if (!master) { 1741 drm_dbg(&i915->drm, 1742 "Unrecognised master engine: { class:%u, instance:%u }\n", 1743 ci.engine_class, ci.engine_instance); 1744 return -EINVAL; 1745 } 1746 1747 if (get_user(num_bonds, &ext->num_bonds)) 1748 return -EFAULT; 1749 1750 for (n = 0; n < num_bonds; n++) { 1751 struct intel_engine_cs *bond; 1752 1753 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 1754 return -EFAULT; 1755 1756 bond = intel_engine_lookup_user(i915, 1757 ci.engine_class, 1758 ci.engine_instance); 1759 if (!bond) { 1760 drm_dbg(&i915->drm, 1761 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 1762 n, ci.engine_class, ci.engine_instance); 1763 return -EINVAL; 1764 } 1765 1766 /* 1767 * A non-virtual engine has no siblings to choose between; and 1768 * a submit fence will always be directed to the one engine. 1769 */ 1770 if (intel_engine_is_virtual(virtual)) { 1771 err = intel_virtual_engine_attach_bond(virtual, 1772 master, 1773 bond); 1774 if (err) 1775 return err; 1776 } 1777 } 1778 1779 return 0; 1780 } 1781 1782 static const i915_user_extension_fn set_engines__extensions[] = { 1783 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, 1784 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, 1785 }; 1786 1787 static int 1788 set_engines(struct i915_gem_context *ctx, 1789 const struct drm_i915_gem_context_param *args) 1790 { 1791 struct drm_i915_private *i915 = ctx->i915; 1792 struct i915_context_param_engines __user *user = 1793 u64_to_user_ptr(args->value); 1794 struct set_engines set = { .ctx = ctx }; 1795 unsigned int num_engines, n; 1796 u64 extensions; 1797 int err; 1798 1799 if (!args->size) { /* switch back to legacy user_ring_map */ 1800 if (!i915_gem_context_user_engines(ctx)) 1801 return 0; 1802 1803 set.engines = default_engines(ctx); 1804 if (IS_ERR(set.engines)) 1805 return PTR_ERR(set.engines); 1806 1807 goto replace; 1808 } 1809 1810 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); 1811 if (args->size < sizeof(*user) || 1812 !IS_ALIGNED(args->size, sizeof(*user->engines))) { 1813 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", 1814 args->size); 1815 return -EINVAL; 1816 } 1817 1818 /* 1819 * Note that I915_EXEC_RING_MASK limits execbuf to only using the 1820 * first 64 engines defined here. 1821 */ 1822 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 1823 set.engines = alloc_engines(num_engines); 1824 if (!set.engines) 1825 return -ENOMEM; 1826 1827 for (n = 0; n < num_engines; n++) { 1828 struct i915_engine_class_instance ci; 1829 struct intel_engine_cs *engine; 1830 struct intel_context *ce; 1831 1832 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 1833 __free_engines(set.engines, n); 1834 return -EFAULT; 1835 } 1836 1837 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 1838 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { 1839 set.engines->engines[n] = NULL; 1840 continue; 1841 } 1842 1843 engine = intel_engine_lookup_user(ctx->i915, 1844 ci.engine_class, 1845 ci.engine_instance); 1846 if (!engine) { 1847 drm_dbg(&i915->drm, 1848 "Invalid engine[%d]: { class:%d, instance:%d }\n", 1849 n, ci.engine_class, ci.engine_instance); 1850 __free_engines(set.engines, n); 1851 return -ENOENT; 1852 } 1853 1854 ce = intel_context_create(engine); 1855 if (IS_ERR(ce)) { 1856 __free_engines(set.engines, n); 1857 return PTR_ERR(ce); 1858 } 1859 1860 intel_context_set_gem(ce, ctx); 1861 1862 set.engines->engines[n] = ce; 1863 } 1864 set.engines->num_engines = num_engines; 1865 1866 err = -EFAULT; 1867 if (!get_user(extensions, &user->extensions)) 1868 err = i915_user_extensions(u64_to_user_ptr(extensions), 1869 set_engines__extensions, 1870 ARRAY_SIZE(set_engines__extensions), 1871 &set); 1872 if (err) { 1873 free_engines(set.engines); 1874 return err; 1875 } 1876 1877 replace: 1878 mutex_lock(&ctx->engines_mutex); 1879 if (i915_gem_context_is_closed(ctx)) { 1880 mutex_unlock(&ctx->engines_mutex); 1881 free_engines(set.engines); 1882 return -ENOENT; 1883 } 1884 if (args->size) 1885 i915_gem_context_set_user_engines(ctx); 1886 else 1887 i915_gem_context_clear_user_engines(ctx); 1888 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); 1889 mutex_unlock(&ctx->engines_mutex); 1890 1891 /* Keep track of old engine sets for kill_context() */ 1892 engines_idle_release(ctx, set.engines); 1893 1894 return 0; 1895 } 1896 1897 static struct i915_gem_engines * 1898 __copy_engines(struct i915_gem_engines *e) 1899 { 1900 struct i915_gem_engines *copy; 1901 unsigned int n; 1902 1903 copy = alloc_engines(e->num_engines); 1904 if (!copy) 1905 return ERR_PTR(-ENOMEM); 1906 1907 for (n = 0; n < e->num_engines; n++) { 1908 if (e->engines[n]) 1909 copy->engines[n] = intel_context_get(e->engines[n]); 1910 else 1911 copy->engines[n] = NULL; 1912 } 1913 copy->num_engines = n; 1914 1915 return copy; 1916 } 1917 1918 static int 1919 get_engines(struct i915_gem_context *ctx, 1920 struct drm_i915_gem_context_param *args) 1921 { 1922 struct i915_context_param_engines __user *user; 1923 struct i915_gem_engines *e; 1924 size_t n, count, size; 1925 int err = 0; 1926 1927 err = mutex_lock_interruptible(&ctx->engines_mutex); 1928 if (err) 1929 return err; 1930 1931 e = NULL; 1932 if (i915_gem_context_user_engines(ctx)) 1933 e = __copy_engines(i915_gem_context_engines(ctx)); 1934 mutex_unlock(&ctx->engines_mutex); 1935 if (IS_ERR_OR_NULL(e)) { 1936 args->size = 0; 1937 return PTR_ERR_OR_ZERO(e); 1938 } 1939 1940 count = e->num_engines; 1941 1942 /* Be paranoid in case we have an impedance mismatch */ 1943 if (!check_struct_size(user, engines, count, &size)) { 1944 err = -EINVAL; 1945 goto err_free; 1946 } 1947 if (overflows_type(size, args->size)) { 1948 err = -EINVAL; 1949 goto err_free; 1950 } 1951 1952 if (!args->size) { 1953 args->size = size; 1954 goto err_free; 1955 } 1956 1957 if (args->size < size) { 1958 err = -EINVAL; 1959 goto err_free; 1960 } 1961 1962 user = u64_to_user_ptr(args->value); 1963 if (put_user(0, &user->extensions)) { 1964 err = -EFAULT; 1965 goto err_free; 1966 } 1967 1968 for (n = 0; n < count; n++) { 1969 struct i915_engine_class_instance ci = { 1970 .engine_class = I915_ENGINE_CLASS_INVALID, 1971 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, 1972 }; 1973 1974 if (e->engines[n]) { 1975 ci.engine_class = e->engines[n]->engine->uabi_class; 1976 ci.engine_instance = e->engines[n]->engine->uabi_instance; 1977 } 1978 1979 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { 1980 err = -EFAULT; 1981 goto err_free; 1982 } 1983 } 1984 1985 args->size = size; 1986 1987 err_free: 1988 free_engines(e); 1989 return err; 1990 } 1991 1992 static int 1993 set_persistence(struct i915_gem_context *ctx, 1994 const struct drm_i915_gem_context_param *args) 1995 { 1996 if (args->size) 1997 return -EINVAL; 1998 1999 return __context_set_persistence(ctx, args->value); 2000 } 2001 2002 static int __apply_priority(struct intel_context *ce, void *arg) 2003 { 2004 struct i915_gem_context *ctx = arg; 2005 2006 if (!intel_engine_has_timeslices(ce->engine)) 2007 return 0; 2008 2009 if (ctx->sched.priority >= I915_PRIORITY_NORMAL) 2010 intel_context_set_use_semaphores(ce); 2011 else 2012 intel_context_clear_use_semaphores(ce); 2013 2014 return 0; 2015 } 2016 2017 static int set_priority(struct i915_gem_context *ctx, 2018 const struct drm_i915_gem_context_param *args) 2019 { 2020 s64 priority = args->value; 2021 2022 if (args->size) 2023 return -EINVAL; 2024 2025 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 2026 return -ENODEV; 2027 2028 if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 2029 priority < I915_CONTEXT_MIN_USER_PRIORITY) 2030 return -EINVAL; 2031 2032 if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 2033 !capable(CAP_SYS_NICE)) 2034 return -EPERM; 2035 2036 ctx->sched.priority = I915_USER_PRIORITY(priority); 2037 context_apply_all(ctx, __apply_priority, ctx); 2038 2039 return 0; 2040 } 2041 2042 static int ctx_setparam(struct drm_i915_file_private *fpriv, 2043 struct i915_gem_context *ctx, 2044 struct drm_i915_gem_context_param *args) 2045 { 2046 int ret = 0; 2047 2048 switch (args->param) { 2049 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2050 if (args->size) 2051 ret = -EINVAL; 2052 else if (args->value) 2053 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2054 else 2055 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2056 break; 2057 2058 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2059 if (args->size) 2060 ret = -EINVAL; 2061 else if (args->value) 2062 i915_gem_context_set_no_error_capture(ctx); 2063 else 2064 i915_gem_context_clear_no_error_capture(ctx); 2065 break; 2066 2067 case I915_CONTEXT_PARAM_BANNABLE: 2068 if (args->size) 2069 ret = -EINVAL; 2070 else if (!capable(CAP_SYS_ADMIN) && !args->value) 2071 ret = -EPERM; 2072 else if (args->value) 2073 i915_gem_context_set_bannable(ctx); 2074 else 2075 i915_gem_context_clear_bannable(ctx); 2076 break; 2077 2078 case I915_CONTEXT_PARAM_RECOVERABLE: 2079 if (args->size) 2080 ret = -EINVAL; 2081 else if (args->value) 2082 i915_gem_context_set_recoverable(ctx); 2083 else 2084 i915_gem_context_clear_recoverable(ctx); 2085 break; 2086 2087 case I915_CONTEXT_PARAM_PRIORITY: 2088 ret = set_priority(ctx, args); 2089 break; 2090 2091 case I915_CONTEXT_PARAM_SSEU: 2092 ret = set_sseu(ctx, args); 2093 break; 2094 2095 case I915_CONTEXT_PARAM_VM: 2096 ret = set_ppgtt(fpriv, ctx, args); 2097 break; 2098 2099 case I915_CONTEXT_PARAM_ENGINES: 2100 ret = set_engines(ctx, args); 2101 break; 2102 2103 case I915_CONTEXT_PARAM_PERSISTENCE: 2104 ret = set_persistence(ctx, args); 2105 break; 2106 2107 case I915_CONTEXT_PARAM_RINGSIZE: 2108 ret = set_ringsize(ctx, args); 2109 break; 2110 2111 case I915_CONTEXT_PARAM_BAN_PERIOD: 2112 default: 2113 ret = -EINVAL; 2114 break; 2115 } 2116 2117 return ret; 2118 } 2119 2120 struct create_ext { 2121 struct i915_gem_context *ctx; 2122 struct drm_i915_file_private *fpriv; 2123 }; 2124 2125 static int create_setparam(struct i915_user_extension __user *ext, void *data) 2126 { 2127 struct drm_i915_gem_context_create_ext_setparam local; 2128 const struct create_ext *arg = data; 2129 2130 if (copy_from_user(&local, ext, sizeof(local))) 2131 return -EFAULT; 2132 2133 if (local.param.ctx_id) 2134 return -EINVAL; 2135 2136 return ctx_setparam(arg->fpriv, arg->ctx, &local.param); 2137 } 2138 2139 static int copy_ring_size(struct intel_context *dst, 2140 struct intel_context *src) 2141 { 2142 long sz; 2143 2144 sz = intel_context_get_ring_size(src); 2145 if (sz < 0) 2146 return sz; 2147 2148 return intel_context_set_ring_size(dst, sz); 2149 } 2150 2151 static int clone_engines(struct i915_gem_context *dst, 2152 struct i915_gem_context *src) 2153 { 2154 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 2155 struct i915_gem_engines *clone; 2156 bool user_engines; 2157 unsigned long n; 2158 2159 clone = alloc_engines(e->num_engines); 2160 if (!clone) 2161 goto err_unlock; 2162 2163 for (n = 0; n < e->num_engines; n++) { 2164 struct intel_engine_cs *engine; 2165 2166 if (!e->engines[n]) { 2167 clone->engines[n] = NULL; 2168 continue; 2169 } 2170 engine = e->engines[n]->engine; 2171 2172 /* 2173 * Virtual engines are singletons; they can only exist 2174 * inside a single context, because they embed their 2175 * HW context... As each virtual context implies a single 2176 * timeline (each engine can only dequeue a single request 2177 * at any time), it would be surprising for two contexts 2178 * to use the same engine. So let's create a copy of 2179 * the virtual engine instead. 2180 */ 2181 if (intel_engine_is_virtual(engine)) 2182 clone->engines[n] = 2183 intel_execlists_clone_virtual(engine); 2184 else 2185 clone->engines[n] = intel_context_create(engine); 2186 if (IS_ERR_OR_NULL(clone->engines[n])) { 2187 __free_engines(clone, n); 2188 goto err_unlock; 2189 } 2190 2191 intel_context_set_gem(clone->engines[n], dst); 2192 2193 /* Copy across the preferred ringsize */ 2194 if (copy_ring_size(clone->engines[n], e->engines[n])) { 2195 __free_engines(clone, n + 1); 2196 goto err_unlock; 2197 } 2198 } 2199 clone->num_engines = n; 2200 2201 user_engines = i915_gem_context_user_engines(src); 2202 i915_gem_context_unlock_engines(src); 2203 2204 /* Serialised by constructor */ 2205 engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1)); 2206 if (user_engines) 2207 i915_gem_context_set_user_engines(dst); 2208 else 2209 i915_gem_context_clear_user_engines(dst); 2210 return 0; 2211 2212 err_unlock: 2213 i915_gem_context_unlock_engines(src); 2214 return -ENOMEM; 2215 } 2216 2217 static int clone_flags(struct i915_gem_context *dst, 2218 struct i915_gem_context *src) 2219 { 2220 dst->user_flags = src->user_flags; 2221 return 0; 2222 } 2223 2224 static int clone_schedattr(struct i915_gem_context *dst, 2225 struct i915_gem_context *src) 2226 { 2227 dst->sched = src->sched; 2228 return 0; 2229 } 2230 2231 static int clone_sseu(struct i915_gem_context *dst, 2232 struct i915_gem_context *src) 2233 { 2234 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 2235 struct i915_gem_engines *clone; 2236 unsigned long n; 2237 int err; 2238 2239 /* no locking required; sole access under constructor*/ 2240 clone = __context_engines_static(dst); 2241 if (e->num_engines != clone->num_engines) { 2242 err = -EINVAL; 2243 goto unlock; 2244 } 2245 2246 for (n = 0; n < e->num_engines; n++) { 2247 struct intel_context *ce = e->engines[n]; 2248 2249 if (clone->engines[n]->engine->class != ce->engine->class) { 2250 /* Must have compatible engine maps! */ 2251 err = -EINVAL; 2252 goto unlock; 2253 } 2254 2255 /* serialises with set_sseu */ 2256 err = intel_context_lock_pinned(ce); 2257 if (err) 2258 goto unlock; 2259 2260 clone->engines[n]->sseu = ce->sseu; 2261 intel_context_unlock_pinned(ce); 2262 } 2263 2264 err = 0; 2265 unlock: 2266 i915_gem_context_unlock_engines(src); 2267 return err; 2268 } 2269 2270 static int clone_timeline(struct i915_gem_context *dst, 2271 struct i915_gem_context *src) 2272 { 2273 if (src->timeline) 2274 __assign_timeline(dst, src->timeline); 2275 2276 return 0; 2277 } 2278 2279 static int clone_vm(struct i915_gem_context *dst, 2280 struct i915_gem_context *src) 2281 { 2282 struct i915_address_space *vm; 2283 int err = 0; 2284 2285 if (!rcu_access_pointer(src->vm)) 2286 return 0; 2287 2288 rcu_read_lock(); 2289 vm = context_get_vm_rcu(src); 2290 rcu_read_unlock(); 2291 2292 if (!mutex_lock_interruptible(&dst->mutex)) { 2293 __assign_ppgtt(dst, vm); 2294 mutex_unlock(&dst->mutex); 2295 } else { 2296 err = -EINTR; 2297 } 2298 2299 i915_vm_put(vm); 2300 return err; 2301 } 2302 2303 static int create_clone(struct i915_user_extension __user *ext, void *data) 2304 { 2305 static int (* const fn[])(struct i915_gem_context *dst, 2306 struct i915_gem_context *src) = { 2307 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y 2308 MAP(ENGINES, clone_engines), 2309 MAP(FLAGS, clone_flags), 2310 MAP(SCHEDATTR, clone_schedattr), 2311 MAP(SSEU, clone_sseu), 2312 MAP(TIMELINE, clone_timeline), 2313 MAP(VM, clone_vm), 2314 #undef MAP 2315 }; 2316 struct drm_i915_gem_context_create_ext_clone local; 2317 const struct create_ext *arg = data; 2318 struct i915_gem_context *dst = arg->ctx; 2319 struct i915_gem_context *src; 2320 int err, bit; 2321 2322 if (copy_from_user(&local, ext, sizeof(local))) 2323 return -EFAULT; 2324 2325 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != 2326 I915_CONTEXT_CLONE_UNKNOWN); 2327 2328 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) 2329 return -EINVAL; 2330 2331 if (local.rsvd) 2332 return -EINVAL; 2333 2334 rcu_read_lock(); 2335 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); 2336 rcu_read_unlock(); 2337 if (!src) 2338 return -ENOENT; 2339 2340 GEM_BUG_ON(src == dst); 2341 2342 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { 2343 if (!(local.flags & BIT(bit))) 2344 continue; 2345 2346 err = fn[bit](dst, src); 2347 if (err) 2348 return err; 2349 } 2350 2351 return 0; 2352 } 2353 2354 static const i915_user_extension_fn create_extensions[] = { 2355 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 2356 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, 2357 }; 2358 2359 static bool client_is_banned(struct drm_i915_file_private *file_priv) 2360 { 2361 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 2362 } 2363 2364 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2365 struct drm_file *file) 2366 { 2367 struct drm_i915_private *i915 = to_i915(dev); 2368 struct drm_i915_gem_context_create_ext *args = data; 2369 struct create_ext ext_data; 2370 int ret; 2371 u32 id; 2372 2373 if (!DRIVER_CAPS(i915)->has_logical_contexts) 2374 return -ENODEV; 2375 2376 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 2377 return -EINVAL; 2378 2379 ret = intel_gt_terminally_wedged(&i915->gt); 2380 if (ret) 2381 return ret; 2382 2383 ext_data.fpriv = file->driver_priv; 2384 if (client_is_banned(ext_data.fpriv)) { 2385 drm_dbg(&i915->drm, 2386 "client %s[%d] banned from creating ctx\n", 2387 current->comm, task_pid_nr(current)); 2388 return -EIO; 2389 } 2390 2391 ext_data.ctx = i915_gem_create_context(i915, args->flags); 2392 if (IS_ERR(ext_data.ctx)) 2393 return PTR_ERR(ext_data.ctx); 2394 2395 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 2396 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 2397 create_extensions, 2398 ARRAY_SIZE(create_extensions), 2399 &ext_data); 2400 if (ret) 2401 goto err_ctx; 2402 } 2403 2404 ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); 2405 if (ret < 0) 2406 goto err_ctx; 2407 2408 args->ctx_id = id; 2409 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id); 2410 2411 return 0; 2412 2413 err_ctx: 2414 context_close(ext_data.ctx); 2415 return ret; 2416 } 2417 2418 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2419 struct drm_file *file) 2420 { 2421 struct drm_i915_gem_context_destroy *args = data; 2422 struct drm_i915_file_private *file_priv = file->driver_priv; 2423 struct i915_gem_context *ctx; 2424 2425 if (args->pad != 0) 2426 return -EINVAL; 2427 2428 if (!args->ctx_id) 2429 return -ENOENT; 2430 2431 ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2432 if (!ctx) 2433 return -ENOENT; 2434 2435 context_close(ctx); 2436 return 0; 2437 } 2438 2439 static int get_sseu(struct i915_gem_context *ctx, 2440 struct drm_i915_gem_context_param *args) 2441 { 2442 struct drm_i915_gem_context_param_sseu user_sseu; 2443 struct intel_context *ce; 2444 unsigned long lookup; 2445 int err; 2446 2447 if (args->size == 0) 2448 goto out; 2449 else if (args->size < sizeof(user_sseu)) 2450 return -EINVAL; 2451 2452 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2453 sizeof(user_sseu))) 2454 return -EFAULT; 2455 2456 if (user_sseu.rsvd) 2457 return -EINVAL; 2458 2459 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2460 return -EINVAL; 2461 2462 lookup = 0; 2463 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2464 lookup |= LOOKUP_USER_INDEX; 2465 2466 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2467 if (IS_ERR(ce)) 2468 return PTR_ERR(ce); 2469 2470 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2471 if (err) { 2472 intel_context_put(ce); 2473 return err; 2474 } 2475 2476 user_sseu.slice_mask = ce->sseu.slice_mask; 2477 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2478 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2479 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2480 2481 intel_context_unlock_pinned(ce); 2482 intel_context_put(ce); 2483 2484 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2485 sizeof(user_sseu))) 2486 return -EFAULT; 2487 2488 out: 2489 args->size = sizeof(user_sseu); 2490 2491 return 0; 2492 } 2493 2494 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2495 struct drm_file *file) 2496 { 2497 struct drm_i915_file_private *file_priv = file->driver_priv; 2498 struct drm_i915_gem_context_param *args = data; 2499 struct i915_gem_context *ctx; 2500 int ret = 0; 2501 2502 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2503 if (!ctx) 2504 return -ENOENT; 2505 2506 switch (args->param) { 2507 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2508 args->size = 0; 2509 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2510 break; 2511 2512 case I915_CONTEXT_PARAM_GTT_SIZE: 2513 args->size = 0; 2514 rcu_read_lock(); 2515 if (rcu_access_pointer(ctx->vm)) 2516 args->value = rcu_dereference(ctx->vm)->total; 2517 else 2518 args->value = to_i915(dev)->ggtt.vm.total; 2519 rcu_read_unlock(); 2520 break; 2521 2522 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2523 args->size = 0; 2524 args->value = i915_gem_context_no_error_capture(ctx); 2525 break; 2526 2527 case I915_CONTEXT_PARAM_BANNABLE: 2528 args->size = 0; 2529 args->value = i915_gem_context_is_bannable(ctx); 2530 break; 2531 2532 case I915_CONTEXT_PARAM_RECOVERABLE: 2533 args->size = 0; 2534 args->value = i915_gem_context_is_recoverable(ctx); 2535 break; 2536 2537 case I915_CONTEXT_PARAM_PRIORITY: 2538 args->size = 0; 2539 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; 2540 break; 2541 2542 case I915_CONTEXT_PARAM_SSEU: 2543 ret = get_sseu(ctx, args); 2544 break; 2545 2546 case I915_CONTEXT_PARAM_VM: 2547 ret = get_ppgtt(file_priv, ctx, args); 2548 break; 2549 2550 case I915_CONTEXT_PARAM_ENGINES: 2551 ret = get_engines(ctx, args); 2552 break; 2553 2554 case I915_CONTEXT_PARAM_PERSISTENCE: 2555 args->size = 0; 2556 args->value = i915_gem_context_is_persistent(ctx); 2557 break; 2558 2559 case I915_CONTEXT_PARAM_RINGSIZE: 2560 ret = get_ringsize(ctx, args); 2561 break; 2562 2563 case I915_CONTEXT_PARAM_BAN_PERIOD: 2564 default: 2565 ret = -EINVAL; 2566 break; 2567 } 2568 2569 i915_gem_context_put(ctx); 2570 return ret; 2571 } 2572 2573 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2574 struct drm_file *file) 2575 { 2576 struct drm_i915_file_private *file_priv = file->driver_priv; 2577 struct drm_i915_gem_context_param *args = data; 2578 struct i915_gem_context *ctx; 2579 int ret; 2580 2581 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2582 if (!ctx) 2583 return -ENOENT; 2584 2585 ret = ctx_setparam(file_priv, ctx, args); 2586 2587 i915_gem_context_put(ctx); 2588 return ret; 2589 } 2590 2591 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2592 void *data, struct drm_file *file) 2593 { 2594 struct drm_i915_private *i915 = to_i915(dev); 2595 struct drm_i915_reset_stats *args = data; 2596 struct i915_gem_context *ctx; 2597 int ret; 2598 2599 if (args->flags || args->pad) 2600 return -EINVAL; 2601 2602 ret = -ENOENT; 2603 rcu_read_lock(); 2604 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); 2605 if (!ctx) 2606 goto out; 2607 2608 /* 2609 * We opt for unserialised reads here. This may result in tearing 2610 * in the extremely unlikely event of a GPU hang on this context 2611 * as we are querying them. If we need that extra layer of protection, 2612 * we should wrap the hangstats with a seqlock. 2613 */ 2614 2615 if (capable(CAP_SYS_ADMIN)) 2616 args->reset_count = i915_reset_count(&i915->gpu_error); 2617 else 2618 args->reset_count = 0; 2619 2620 args->batch_active = atomic_read(&ctx->guilty_count); 2621 args->batch_pending = atomic_read(&ctx->active_count); 2622 2623 ret = 0; 2624 out: 2625 rcu_read_unlock(); 2626 return ret; 2627 } 2628 2629 /* GEM context-engines iterator: for_each_gem_engine() */ 2630 struct intel_context * 2631 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2632 { 2633 const struct i915_gem_engines *e = it->engines; 2634 struct intel_context *ctx; 2635 2636 if (unlikely(!e)) 2637 return NULL; 2638 2639 do { 2640 if (it->idx >= e->num_engines) 2641 return NULL; 2642 2643 ctx = e->engines[it->idx++]; 2644 } while (!ctx); 2645 2646 return ctx; 2647 } 2648 2649 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2650 #include "selftests/mock_context.c" 2651 #include "selftests/i915_gem_context.c" 2652 #endif 2653 2654 static void i915_global_gem_context_shrink(void) 2655 { 2656 kmem_cache_shrink(global.slab_luts); 2657 } 2658 2659 static void i915_global_gem_context_exit(void) 2660 { 2661 kmem_cache_destroy(global.slab_luts); 2662 } 2663 2664 static struct i915_global_gem_context global = { { 2665 .shrink = i915_global_gem_context_shrink, 2666 .exit = i915_global_gem_context_exit, 2667 } }; 2668 2669 int __init i915_global_gem_context_init(void) 2670 { 2671 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2672 if (!global.slab_luts) 2673 return -ENOMEM; 2674 2675 i915_global_register(&global.base); 2676 return 0; 2677 } 2678