1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2011-2012 Intel Corporation 5 */ 6 7 /* 8 * This file implements HW context support. On gen5+ a HW context consists of an 9 * opaque GPU object which is referenced at times of context saves and restores. 10 * With RC6 enabled, the context is also referenced as the GPU enters and exists 11 * from RC6 (GPU has it's own internal power context, except on gen5). Though 12 * something like a context does exist for the media ring, the code only 13 * supports contexts for the render ring. 14 * 15 * In software, there is a distinction between contexts created by the user, 16 * and the default HW context. The default HW context is used by GPU clients 17 * that do not request setup of their own hardware context. The default 18 * context's state is never restored to help prevent programming errors. This 19 * would happen if a client ran and piggy-backed off another clients GPU state. 20 * The default context only exists to give the GPU some offset to load as the 21 * current to invoke a save of the context we actually care about. In fact, the 22 * code could likely be constructed, albeit in a more complicated fashion, to 23 * never use the default context, though that limits the driver's ability to 24 * swap out, and/or destroy other contexts. 25 * 26 * All other contexts are created as a request by the GPU client. These contexts 27 * store GPU state, and thus allow GPU clients to not re-emit state (and 28 * potentially query certain state) at any time. The kernel driver makes 29 * certain that the appropriate commands are inserted. 30 * 31 * The context life cycle is semi-complicated in that context BOs may live 32 * longer than the context itself because of the way the hardware, and object 33 * tracking works. Below is a very crude representation of the state machine 34 * describing the context life. 35 * refcount pincount active 36 * S0: initial state 0 0 0 37 * S1: context created 1 0 0 38 * S2: context is currently running 2 1 X 39 * S3: GPU referenced, but not current 2 0 1 40 * S4: context is current, but destroyed 1 1 0 41 * S5: like S3, but destroyed 1 0 1 42 * 43 * The most common (but not all) transitions: 44 * S0->S1: client creates a context 45 * S1->S2: client submits execbuf with context 46 * S2->S3: other clients submits execbuf with context 47 * S3->S1: context object was retired 48 * S3->S2: clients submits another execbuf 49 * S2->S4: context destroy called with current context 50 * S3->S5->S0: destroy path 51 * S4->S5->S0: destroy path on current context 52 * 53 * There are two confusing terms used above: 54 * The "current context" means the context which is currently running on the 55 * GPU. The GPU has loaded its state already and has stored away the gtt 56 * offset of the BO. The GPU is not actively referencing the data at this 57 * offset, but it will on the next context switch. The only way to avoid this 58 * is to do a GPU reset. 59 * 60 * An "active context' is one which was previously the "current context" and is 61 * on the active list waiting for the next context switch to occur. Until this 62 * happens, the object must remain at the same gtt offset. It is therefore 63 * possible to destroy a context, but it is still active. 64 * 65 */ 66 67 #include <linux/log2.h> 68 #include <linux/nospec.h> 69 70 #include <drm/i915_drm.h> 71 72 #include "gt/intel_lrc_reg.h" 73 #include "gt/intel_engine_user.h" 74 75 #include "i915_gem_context.h" 76 #include "i915_globals.h" 77 #include "i915_trace.h" 78 #include "i915_user_extensions.h" 79 80 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 81 82 static struct i915_global_gem_context { 83 struct i915_global base; 84 struct kmem_cache *slab_luts; 85 } global; 86 87 struct i915_lut_handle *i915_lut_handle_alloc(void) 88 { 89 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); 90 } 91 92 void i915_lut_handle_free(struct i915_lut_handle *lut) 93 { 94 return kmem_cache_free(global.slab_luts, lut); 95 } 96 97 static void lut_close(struct i915_gem_context *ctx) 98 { 99 struct radix_tree_iter iter; 100 void __rcu **slot; 101 102 lockdep_assert_held(&ctx->mutex); 103 104 rcu_read_lock(); 105 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 106 struct i915_vma *vma = rcu_dereference_raw(*slot); 107 struct drm_i915_gem_object *obj = vma->obj; 108 struct i915_lut_handle *lut; 109 110 if (!kref_get_unless_zero(&obj->base.refcount)) 111 continue; 112 113 rcu_read_unlock(); 114 i915_gem_object_lock(obj); 115 list_for_each_entry(lut, &obj->lut_list, obj_link) { 116 if (lut->ctx != ctx) 117 continue; 118 119 if (lut->handle != iter.index) 120 continue; 121 122 list_del(&lut->obj_link); 123 break; 124 } 125 i915_gem_object_unlock(obj); 126 rcu_read_lock(); 127 128 if (&lut->obj_link != &obj->lut_list) { 129 i915_lut_handle_free(lut); 130 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 131 if (atomic_dec_and_test(&vma->open_count) && 132 !i915_vma_is_ggtt(vma)) 133 i915_vma_close(vma); 134 i915_gem_object_put(obj); 135 } 136 137 i915_gem_object_put(obj); 138 } 139 rcu_read_unlock(); 140 } 141 142 static struct intel_context * 143 lookup_user_engine(struct i915_gem_context *ctx, 144 unsigned long flags, 145 const struct i915_engine_class_instance *ci) 146 #define LOOKUP_USER_INDEX BIT(0) 147 { 148 int idx; 149 150 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 151 return ERR_PTR(-EINVAL); 152 153 if (!i915_gem_context_user_engines(ctx)) { 154 struct intel_engine_cs *engine; 155 156 engine = intel_engine_lookup_user(ctx->i915, 157 ci->engine_class, 158 ci->engine_instance); 159 if (!engine) 160 return ERR_PTR(-EINVAL); 161 162 idx = engine->legacy_idx; 163 } else { 164 idx = ci->engine_instance; 165 } 166 167 return i915_gem_context_get_engine(ctx, idx); 168 } 169 170 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 171 { 172 while (count--) { 173 if (!e->engines[count]) 174 continue; 175 176 intel_context_put(e->engines[count]); 177 } 178 kfree(e); 179 } 180 181 static void free_engines(struct i915_gem_engines *e) 182 { 183 __free_engines(e, e->num_engines); 184 } 185 186 static void free_engines_rcu(struct rcu_head *rcu) 187 { 188 free_engines(container_of(rcu, struct i915_gem_engines, rcu)); 189 } 190 191 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) 192 { 193 const struct intel_gt *gt = &ctx->i915->gt; 194 struct intel_engine_cs *engine; 195 struct i915_gem_engines *e; 196 enum intel_engine_id id; 197 198 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL); 199 if (!e) 200 return ERR_PTR(-ENOMEM); 201 202 init_rcu_head(&e->rcu); 203 for_each_engine(engine, gt, id) { 204 struct intel_context *ce; 205 206 ce = intel_context_create(ctx, engine); 207 if (IS_ERR(ce)) { 208 __free_engines(e, id); 209 return ERR_CAST(ce); 210 } 211 212 e->engines[id] = ce; 213 e->num_engines = id + 1; 214 } 215 216 return e; 217 } 218 219 static void i915_gem_context_free(struct i915_gem_context *ctx) 220 { 221 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 222 223 spin_lock(&ctx->i915->gem.contexts.lock); 224 list_del(&ctx->link); 225 spin_unlock(&ctx->i915->gem.contexts.lock); 226 227 free_engines(rcu_access_pointer(ctx->engines)); 228 mutex_destroy(&ctx->engines_mutex); 229 230 if (ctx->timeline) 231 intel_timeline_put(ctx->timeline); 232 233 kfree(ctx->name); 234 put_pid(ctx->pid); 235 236 mutex_destroy(&ctx->mutex); 237 238 kfree_rcu(ctx, rcu); 239 } 240 241 static void contexts_free_all(struct llist_node *list) 242 { 243 struct i915_gem_context *ctx, *cn; 244 245 llist_for_each_entry_safe(ctx, cn, list, free_link) 246 i915_gem_context_free(ctx); 247 } 248 249 static void contexts_flush_free(struct i915_gem_contexts *gc) 250 { 251 contexts_free_all(llist_del_all(&gc->free_list)); 252 } 253 254 static void contexts_free_worker(struct work_struct *work) 255 { 256 struct i915_gem_contexts *gc = 257 container_of(work, typeof(*gc), free_work); 258 259 contexts_flush_free(gc); 260 } 261 262 void i915_gem_context_release(struct kref *ref) 263 { 264 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 265 struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; 266 267 trace_i915_context_free(ctx); 268 if (llist_add(&ctx->free_link, &gc->free_list)) 269 schedule_work(&gc->free_work); 270 } 271 272 static void context_close(struct i915_gem_context *ctx) 273 { 274 struct i915_address_space *vm; 275 276 i915_gem_context_set_closed(ctx); 277 278 mutex_lock(&ctx->mutex); 279 280 vm = i915_gem_context_vm(ctx); 281 if (vm) 282 i915_vm_close(vm); 283 284 ctx->file_priv = ERR_PTR(-EBADF); 285 286 /* 287 * The LUT uses the VMA as a backpointer to unref the object, 288 * so we need to clear the LUT before we close all the VMA (inside 289 * the ppgtt). 290 */ 291 lut_close(ctx); 292 293 mutex_unlock(&ctx->mutex); 294 i915_gem_context_put(ctx); 295 } 296 297 static struct i915_gem_context * 298 __create_context(struct drm_i915_private *i915) 299 { 300 struct i915_gem_context *ctx; 301 struct i915_gem_engines *e; 302 int err; 303 int i; 304 305 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 306 if (!ctx) 307 return ERR_PTR(-ENOMEM); 308 309 kref_init(&ctx->ref); 310 ctx->i915 = i915; 311 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); 312 mutex_init(&ctx->mutex); 313 314 mutex_init(&ctx->engines_mutex); 315 e = default_engines(ctx); 316 if (IS_ERR(e)) { 317 err = PTR_ERR(e); 318 goto err_free; 319 } 320 RCU_INIT_POINTER(ctx->engines, e); 321 322 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 323 324 /* NB: Mark all slices as needing a remap so that when the context first 325 * loads it will restore whatever remap state already exists. If there 326 * is no remap info, it will be a NOP. */ 327 ctx->remap_slice = ALL_L3_SLICES(i915); 328 329 i915_gem_context_set_bannable(ctx); 330 i915_gem_context_set_recoverable(ctx); 331 332 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 333 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 334 335 spin_lock(&i915->gem.contexts.lock); 336 list_add_tail(&ctx->link, &i915->gem.contexts.list); 337 spin_unlock(&i915->gem.contexts.lock); 338 339 return ctx; 340 341 err_free: 342 kfree(ctx); 343 return ERR_PTR(err); 344 } 345 346 static void 347 context_apply_all(struct i915_gem_context *ctx, 348 void (*fn)(struct intel_context *ce, void *data), 349 void *data) 350 { 351 struct i915_gem_engines_iter it; 352 struct intel_context *ce; 353 354 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) 355 fn(ce, data); 356 i915_gem_context_unlock_engines(ctx); 357 } 358 359 static void __apply_ppgtt(struct intel_context *ce, void *vm) 360 { 361 i915_vm_put(ce->vm); 362 ce->vm = i915_vm_get(vm); 363 } 364 365 static struct i915_address_space * 366 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) 367 { 368 struct i915_address_space *old = i915_gem_context_vm(ctx); 369 370 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); 371 372 rcu_assign_pointer(ctx->vm, i915_vm_open(vm)); 373 context_apply_all(ctx, __apply_ppgtt, vm); 374 375 return old; 376 } 377 378 static void __assign_ppgtt(struct i915_gem_context *ctx, 379 struct i915_address_space *vm) 380 { 381 if (vm == rcu_access_pointer(ctx->vm)) 382 return; 383 384 vm = __set_ppgtt(ctx, vm); 385 if (vm) 386 i915_vm_close(vm); 387 } 388 389 static void __set_timeline(struct intel_timeline **dst, 390 struct intel_timeline *src) 391 { 392 struct intel_timeline *old = *dst; 393 394 *dst = src ? intel_timeline_get(src) : NULL; 395 396 if (old) 397 intel_timeline_put(old); 398 } 399 400 static void __apply_timeline(struct intel_context *ce, void *timeline) 401 { 402 __set_timeline(&ce->timeline, timeline); 403 } 404 405 static void __assign_timeline(struct i915_gem_context *ctx, 406 struct intel_timeline *timeline) 407 { 408 __set_timeline(&ctx->timeline, timeline); 409 context_apply_all(ctx, __apply_timeline, timeline); 410 } 411 412 static struct i915_gem_context * 413 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) 414 { 415 struct i915_gem_context *ctx; 416 417 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && 418 !HAS_EXECLISTS(i915)) 419 return ERR_PTR(-EINVAL); 420 421 /* Reap the stale contexts */ 422 contexts_flush_free(&i915->gem.contexts); 423 424 ctx = __create_context(i915); 425 if (IS_ERR(ctx)) 426 return ctx; 427 428 if (HAS_FULL_PPGTT(i915)) { 429 struct i915_ppgtt *ppgtt; 430 431 ppgtt = i915_ppgtt_create(i915); 432 if (IS_ERR(ppgtt)) { 433 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 434 PTR_ERR(ppgtt)); 435 context_close(ctx); 436 return ERR_CAST(ppgtt); 437 } 438 439 mutex_lock(&ctx->mutex); 440 __assign_ppgtt(ctx, &ppgtt->vm); 441 mutex_unlock(&ctx->mutex); 442 443 i915_vm_put(&ppgtt->vm); 444 } 445 446 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 447 struct intel_timeline *timeline; 448 449 timeline = intel_timeline_create(&i915->gt, NULL); 450 if (IS_ERR(timeline)) { 451 context_close(ctx); 452 return ERR_CAST(timeline); 453 } 454 455 __assign_timeline(ctx, timeline); 456 intel_timeline_put(timeline); 457 } 458 459 trace_i915_context_create(ctx); 460 461 return ctx; 462 } 463 464 static void 465 destroy_kernel_context(struct i915_gem_context **ctxp) 466 { 467 struct i915_gem_context *ctx; 468 469 /* Keep the context ref so that we can free it immediately ourselves */ 470 ctx = i915_gem_context_get(fetch_and_zero(ctxp)); 471 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); 472 473 context_close(ctx); 474 i915_gem_context_free(ctx); 475 } 476 477 struct i915_gem_context * 478 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) 479 { 480 struct i915_gem_context *ctx; 481 482 ctx = i915_gem_create_context(i915, 0); 483 if (IS_ERR(ctx)) 484 return ctx; 485 486 i915_gem_context_clear_bannable(ctx); 487 ctx->sched.priority = I915_USER_PRIORITY(prio); 488 489 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); 490 491 return ctx; 492 } 493 494 static void init_contexts(struct i915_gem_contexts *gc) 495 { 496 spin_lock_init(&gc->lock); 497 INIT_LIST_HEAD(&gc->list); 498 499 INIT_WORK(&gc->free_work, contexts_free_worker); 500 init_llist_head(&gc->free_list); 501 } 502 503 int i915_gem_init_contexts(struct drm_i915_private *i915) 504 { 505 struct i915_gem_context *ctx; 506 507 /* Reassure ourselves we are only called once */ 508 GEM_BUG_ON(i915->kernel_context); 509 510 init_contexts(&i915->gem.contexts); 511 512 /* lowest priority; idle task */ 513 ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN); 514 if (IS_ERR(ctx)) { 515 DRM_ERROR("Failed to create default global context\n"); 516 return PTR_ERR(ctx); 517 } 518 i915->kernel_context = ctx; 519 520 DRM_DEBUG_DRIVER("%s context support initialized\n", 521 DRIVER_CAPS(i915)->has_logical_contexts ? 522 "logical" : "fake"); 523 return 0; 524 } 525 526 void i915_gem_driver_release__contexts(struct drm_i915_private *i915) 527 { 528 destroy_kernel_context(&i915->kernel_context); 529 } 530 531 static int context_idr_cleanup(int id, void *p, void *data) 532 { 533 context_close(p); 534 return 0; 535 } 536 537 static int vm_idr_cleanup(int id, void *p, void *data) 538 { 539 i915_vm_put(p); 540 return 0; 541 } 542 543 static int gem_context_register(struct i915_gem_context *ctx, 544 struct drm_i915_file_private *fpriv) 545 { 546 struct i915_address_space *vm; 547 int ret; 548 549 ctx->file_priv = fpriv; 550 551 mutex_lock(&ctx->mutex); 552 vm = i915_gem_context_vm(ctx); 553 if (vm) 554 WRITE_ONCE(vm->file, fpriv); /* XXX */ 555 mutex_unlock(&ctx->mutex); 556 557 ctx->pid = get_task_pid(current, PIDTYPE_PID); 558 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", 559 current->comm, pid_nr(ctx->pid)); 560 if (!ctx->name) { 561 ret = -ENOMEM; 562 goto err_pid; 563 } 564 565 /* And finally expose ourselves to userspace via the idr */ 566 mutex_lock(&fpriv->context_idr_lock); 567 ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL); 568 mutex_unlock(&fpriv->context_idr_lock); 569 if (ret >= 0) 570 goto out; 571 572 kfree(fetch_and_zero(&ctx->name)); 573 err_pid: 574 put_pid(fetch_and_zero(&ctx->pid)); 575 out: 576 return ret; 577 } 578 579 int i915_gem_context_open(struct drm_i915_private *i915, 580 struct drm_file *file) 581 { 582 struct drm_i915_file_private *file_priv = file->driver_priv; 583 struct i915_gem_context *ctx; 584 int err; 585 586 mutex_init(&file_priv->context_idr_lock); 587 mutex_init(&file_priv->vm_idr_lock); 588 589 idr_init(&file_priv->context_idr); 590 idr_init_base(&file_priv->vm_idr, 1); 591 592 ctx = i915_gem_create_context(i915, 0); 593 if (IS_ERR(ctx)) { 594 err = PTR_ERR(ctx); 595 goto err; 596 } 597 598 err = gem_context_register(ctx, file_priv); 599 if (err < 0) 600 goto err_ctx; 601 602 GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); 603 GEM_BUG_ON(err > 0); 604 605 return 0; 606 607 err_ctx: 608 context_close(ctx); 609 err: 610 idr_destroy(&file_priv->vm_idr); 611 idr_destroy(&file_priv->context_idr); 612 mutex_destroy(&file_priv->vm_idr_lock); 613 mutex_destroy(&file_priv->context_idr_lock); 614 return err; 615 } 616 617 void i915_gem_context_close(struct drm_file *file) 618 { 619 struct drm_i915_file_private *file_priv = file->driver_priv; 620 struct drm_i915_private *i915 = file_priv->dev_priv; 621 622 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 623 idr_destroy(&file_priv->context_idr); 624 mutex_destroy(&file_priv->context_idr_lock); 625 626 idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); 627 idr_destroy(&file_priv->vm_idr); 628 mutex_destroy(&file_priv->vm_idr_lock); 629 630 contexts_flush_free(&i915->gem.contexts); 631 } 632 633 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 634 struct drm_file *file) 635 { 636 struct drm_i915_private *i915 = to_i915(dev); 637 struct drm_i915_gem_vm_control *args = data; 638 struct drm_i915_file_private *file_priv = file->driver_priv; 639 struct i915_ppgtt *ppgtt; 640 int err; 641 642 if (!HAS_FULL_PPGTT(i915)) 643 return -ENODEV; 644 645 if (args->flags) 646 return -EINVAL; 647 648 ppgtt = i915_ppgtt_create(i915); 649 if (IS_ERR(ppgtt)) 650 return PTR_ERR(ppgtt); 651 652 ppgtt->vm.file = file_priv; 653 654 if (args->extensions) { 655 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 656 NULL, 0, 657 ppgtt); 658 if (err) 659 goto err_put; 660 } 661 662 err = mutex_lock_interruptible(&file_priv->vm_idr_lock); 663 if (err) 664 goto err_put; 665 666 err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL); 667 if (err < 0) 668 goto err_unlock; 669 670 GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */ 671 672 mutex_unlock(&file_priv->vm_idr_lock); 673 674 args->vm_id = err; 675 return 0; 676 677 err_unlock: 678 mutex_unlock(&file_priv->vm_idr_lock); 679 err_put: 680 i915_vm_put(&ppgtt->vm); 681 return err; 682 } 683 684 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 685 struct drm_file *file) 686 { 687 struct drm_i915_file_private *file_priv = file->driver_priv; 688 struct drm_i915_gem_vm_control *args = data; 689 struct i915_address_space *vm; 690 int err; 691 u32 id; 692 693 if (args->flags) 694 return -EINVAL; 695 696 if (args->extensions) 697 return -EINVAL; 698 699 id = args->vm_id; 700 if (!id) 701 return -ENOENT; 702 703 err = mutex_lock_interruptible(&file_priv->vm_idr_lock); 704 if (err) 705 return err; 706 707 vm = idr_remove(&file_priv->vm_idr, id); 708 709 mutex_unlock(&file_priv->vm_idr_lock); 710 if (!vm) 711 return -ENOENT; 712 713 i915_vm_put(vm); 714 return 0; 715 } 716 717 struct context_barrier_task { 718 struct i915_active base; 719 void (*task)(void *data); 720 void *data; 721 }; 722 723 __i915_active_call 724 static void cb_retire(struct i915_active *base) 725 { 726 struct context_barrier_task *cb = container_of(base, typeof(*cb), base); 727 728 if (cb->task) 729 cb->task(cb->data); 730 731 i915_active_fini(&cb->base); 732 kfree(cb); 733 } 734 735 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); 736 static int context_barrier_task(struct i915_gem_context *ctx, 737 intel_engine_mask_t engines, 738 bool (*skip)(struct intel_context *ce, void *data), 739 int (*emit)(struct i915_request *rq, void *data), 740 void (*task)(void *data), 741 void *data) 742 { 743 struct context_barrier_task *cb; 744 struct i915_gem_engines_iter it; 745 struct intel_context *ce; 746 int err = 0; 747 748 GEM_BUG_ON(!task); 749 750 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 751 if (!cb) 752 return -ENOMEM; 753 754 i915_active_init(&cb->base, NULL, cb_retire); 755 err = i915_active_acquire(&cb->base); 756 if (err) { 757 kfree(cb); 758 return err; 759 } 760 761 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 762 struct i915_request *rq; 763 764 if (I915_SELFTEST_ONLY(context_barrier_inject_fault & 765 ce->engine->mask)) { 766 err = -ENXIO; 767 break; 768 } 769 770 if (!(ce->engine->mask & engines)) 771 continue; 772 773 if (skip && skip(ce, data)) 774 continue; 775 776 rq = intel_context_create_request(ce); 777 if (IS_ERR(rq)) { 778 err = PTR_ERR(rq); 779 break; 780 } 781 782 err = 0; 783 if (emit) 784 err = emit(rq, data); 785 if (err == 0) 786 err = i915_active_add_request(&cb->base, rq); 787 788 i915_request_add(rq); 789 if (err) 790 break; 791 } 792 i915_gem_context_unlock_engines(ctx); 793 794 cb->task = err ? NULL : task; /* caller needs to unwind instead */ 795 cb->data = data; 796 797 i915_active_release(&cb->base); 798 799 return err; 800 } 801 802 static int get_ppgtt(struct drm_i915_file_private *file_priv, 803 struct i915_gem_context *ctx, 804 struct drm_i915_gem_context_param *args) 805 { 806 struct i915_address_space *vm; 807 int ret; 808 809 if (!rcu_access_pointer(ctx->vm)) 810 return -ENODEV; 811 812 rcu_read_lock(); 813 vm = i915_vm_get(ctx->vm); 814 rcu_read_unlock(); 815 816 ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); 817 if (ret) 818 goto err_put; 819 820 ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL); 821 GEM_BUG_ON(!ret); 822 if (ret < 0) 823 goto err_unlock; 824 825 i915_vm_open(vm); 826 827 args->size = 0; 828 args->value = ret; 829 830 ret = 0; 831 err_unlock: 832 mutex_unlock(&file_priv->vm_idr_lock); 833 err_put: 834 i915_vm_put(vm); 835 return ret; 836 } 837 838 static void set_ppgtt_barrier(void *data) 839 { 840 struct i915_address_space *old = data; 841 842 if (INTEL_GEN(old->i915) < 8) 843 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); 844 845 i915_vm_close(old); 846 } 847 848 static int emit_ppgtt_update(struct i915_request *rq, void *data) 849 { 850 struct i915_address_space *vm = rq->hw_context->vm; 851 struct intel_engine_cs *engine = rq->engine; 852 u32 base = engine->mmio_base; 853 u32 *cs; 854 int i; 855 856 if (i915_vm_is_4lvl(vm)) { 857 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 858 const dma_addr_t pd_daddr = px_dma(ppgtt->pd); 859 860 cs = intel_ring_begin(rq, 6); 861 if (IS_ERR(cs)) 862 return PTR_ERR(cs); 863 864 *cs++ = MI_LOAD_REGISTER_IMM(2); 865 866 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 867 *cs++ = upper_32_bits(pd_daddr); 868 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 869 *cs++ = lower_32_bits(pd_daddr); 870 871 *cs++ = MI_NOOP; 872 intel_ring_advance(rq, cs); 873 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { 874 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 875 int err; 876 877 /* Magic required to prevent forcewake errors! */ 878 err = engine->emit_flush(rq, EMIT_INVALIDATE); 879 if (err) 880 return err; 881 882 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 883 if (IS_ERR(cs)) 884 return PTR_ERR(cs); 885 886 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 887 for (i = GEN8_3LVL_PDPES; i--; ) { 888 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 889 890 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 891 *cs++ = upper_32_bits(pd_daddr); 892 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 893 *cs++ = lower_32_bits(pd_daddr); 894 } 895 *cs++ = MI_NOOP; 896 intel_ring_advance(rq, cs); 897 } else { 898 /* ppGTT is not part of the legacy context image */ 899 gen6_ppgtt_pin(i915_vm_to_ppgtt(vm)); 900 } 901 902 return 0; 903 } 904 905 static bool skip_ppgtt_update(struct intel_context *ce, void *data) 906 { 907 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) 908 return !ce->state; 909 else 910 return !atomic_read(&ce->pin_count); 911 } 912 913 static int set_ppgtt(struct drm_i915_file_private *file_priv, 914 struct i915_gem_context *ctx, 915 struct drm_i915_gem_context_param *args) 916 { 917 struct i915_address_space *vm, *old; 918 int err; 919 920 if (args->size) 921 return -EINVAL; 922 923 if (!rcu_access_pointer(ctx->vm)) 924 return -ENODEV; 925 926 if (upper_32_bits(args->value)) 927 return -ENOENT; 928 929 rcu_read_lock(); 930 vm = idr_find(&file_priv->vm_idr, args->value); 931 if (vm && !kref_get_unless_zero(&vm->ref)) 932 vm = NULL; 933 rcu_read_unlock(); 934 if (!vm) 935 return -ENOENT; 936 937 err = mutex_lock_interruptible(&ctx->mutex); 938 if (err) 939 goto out; 940 941 if (i915_gem_context_is_closed(ctx)) { 942 err = -ENOENT; 943 goto out; 944 } 945 946 if (vm == rcu_access_pointer(ctx->vm)) 947 goto unlock; 948 949 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ 950 lut_close(ctx); 951 952 old = __set_ppgtt(ctx, vm); 953 954 /* 955 * We need to flush any requests using the current ppgtt before 956 * we release it as the requests do not hold a reference themselves, 957 * only indirectly through the context. 958 */ 959 err = context_barrier_task(ctx, ALL_ENGINES, 960 skip_ppgtt_update, 961 emit_ppgtt_update, 962 set_ppgtt_barrier, 963 old); 964 if (err) { 965 i915_vm_close(__set_ppgtt(ctx, old)); 966 i915_vm_close(old); 967 } 968 969 unlock: 970 mutex_unlock(&ctx->mutex); 971 out: 972 i915_vm_put(vm); 973 return err; 974 } 975 976 static int gen8_emit_rpcs_config(struct i915_request *rq, 977 struct intel_context *ce, 978 struct intel_sseu sseu) 979 { 980 u64 offset; 981 u32 *cs; 982 983 cs = intel_ring_begin(rq, 4); 984 if (IS_ERR(cs)) 985 return PTR_ERR(cs); 986 987 offset = i915_ggtt_offset(ce->state) + 988 LRC_STATE_PN * PAGE_SIZE + 989 CTX_R_PWR_CLK_STATE * 4; 990 991 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 992 *cs++ = lower_32_bits(offset); 993 *cs++ = upper_32_bits(offset); 994 *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu); 995 996 intel_ring_advance(rq, cs); 997 998 return 0; 999 } 1000 1001 static int 1002 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) 1003 { 1004 struct i915_request *rq; 1005 int ret; 1006 1007 lockdep_assert_held(&ce->pin_mutex); 1008 1009 /* 1010 * If the context is not idle, we have to submit an ordered request to 1011 * modify its context image via the kernel context (writing to our own 1012 * image, or into the registers directory, does not stick). Pristine 1013 * and idle contexts will be configured on pinning. 1014 */ 1015 if (!intel_context_is_pinned(ce)) 1016 return 0; 1017 1018 rq = i915_request_create(ce->engine->kernel_context); 1019 if (IS_ERR(rq)) 1020 return PTR_ERR(rq); 1021 1022 /* Serialise with the remote context */ 1023 ret = intel_context_prepare_remote_request(ce, rq); 1024 if (ret == 0) 1025 ret = gen8_emit_rpcs_config(rq, ce, sseu); 1026 1027 i915_request_add(rq); 1028 return ret; 1029 } 1030 1031 static int 1032 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) 1033 { 1034 int ret; 1035 1036 GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8); 1037 1038 ret = intel_context_lock_pinned(ce); 1039 if (ret) 1040 return ret; 1041 1042 /* Nothing to do if unmodified. */ 1043 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) 1044 goto unlock; 1045 1046 ret = gen8_modify_rpcs(ce, sseu); 1047 if (!ret) 1048 ce->sseu = sseu; 1049 1050 unlock: 1051 intel_context_unlock_pinned(ce); 1052 return ret; 1053 } 1054 1055 static int 1056 user_to_context_sseu(struct drm_i915_private *i915, 1057 const struct drm_i915_gem_context_param_sseu *user, 1058 struct intel_sseu *context) 1059 { 1060 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; 1061 1062 /* No zeros in any field. */ 1063 if (!user->slice_mask || !user->subslice_mask || 1064 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1065 return -EINVAL; 1066 1067 /* Max > min. */ 1068 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1069 return -EINVAL; 1070 1071 /* 1072 * Some future proofing on the types since the uAPI is wider than the 1073 * current internal implementation. 1074 */ 1075 if (overflows_type(user->slice_mask, context->slice_mask) || 1076 overflows_type(user->subslice_mask, context->subslice_mask) || 1077 overflows_type(user->min_eus_per_subslice, 1078 context->min_eus_per_subslice) || 1079 overflows_type(user->max_eus_per_subslice, 1080 context->max_eus_per_subslice)) 1081 return -EINVAL; 1082 1083 /* Check validity against hardware. */ 1084 if (user->slice_mask & ~device->slice_mask) 1085 return -EINVAL; 1086 1087 if (user->subslice_mask & ~device->subslice_mask[0]) 1088 return -EINVAL; 1089 1090 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1091 return -EINVAL; 1092 1093 context->slice_mask = user->slice_mask; 1094 context->subslice_mask = user->subslice_mask; 1095 context->min_eus_per_subslice = user->min_eus_per_subslice; 1096 context->max_eus_per_subslice = user->max_eus_per_subslice; 1097 1098 /* Part specific restrictions. */ 1099 if (IS_GEN(i915, 11)) { 1100 unsigned int hw_s = hweight8(device->slice_mask); 1101 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 1102 unsigned int req_s = hweight8(context->slice_mask); 1103 unsigned int req_ss = hweight8(context->subslice_mask); 1104 1105 /* 1106 * Only full subslice enablement is possible if more than one 1107 * slice is turned on. 1108 */ 1109 if (req_s > 1 && req_ss != hw_ss_per_s) 1110 return -EINVAL; 1111 1112 /* 1113 * If more than four (SScount bitfield limit) subslices are 1114 * requested then the number has to be even. 1115 */ 1116 if (req_ss > 4 && (req_ss & 1)) 1117 return -EINVAL; 1118 1119 /* 1120 * If only one slice is enabled and subslice count is below the 1121 * device full enablement, it must be at most half of the all 1122 * available subslices. 1123 */ 1124 if (req_s == 1 && req_ss < hw_ss_per_s && 1125 req_ss > (hw_ss_per_s / 2)) 1126 return -EINVAL; 1127 1128 /* ABI restriction - VME use case only. */ 1129 1130 /* All slices or one slice only. */ 1131 if (req_s != 1 && req_s != hw_s) 1132 return -EINVAL; 1133 1134 /* 1135 * Half subslices or full enablement only when one slice is 1136 * enabled. 1137 */ 1138 if (req_s == 1 && 1139 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1140 return -EINVAL; 1141 1142 /* No EU configuration changes. */ 1143 if ((user->min_eus_per_subslice != 1144 device->max_eus_per_subslice) || 1145 (user->max_eus_per_subslice != 1146 device->max_eus_per_subslice)) 1147 return -EINVAL; 1148 } 1149 1150 return 0; 1151 } 1152 1153 static int set_sseu(struct i915_gem_context *ctx, 1154 struct drm_i915_gem_context_param *args) 1155 { 1156 struct drm_i915_private *i915 = ctx->i915; 1157 struct drm_i915_gem_context_param_sseu user_sseu; 1158 struct intel_context *ce; 1159 struct intel_sseu sseu; 1160 unsigned long lookup; 1161 int ret; 1162 1163 if (args->size < sizeof(user_sseu)) 1164 return -EINVAL; 1165 1166 if (!IS_GEN(i915, 11)) 1167 return -ENODEV; 1168 1169 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1170 sizeof(user_sseu))) 1171 return -EFAULT; 1172 1173 if (user_sseu.rsvd) 1174 return -EINVAL; 1175 1176 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1177 return -EINVAL; 1178 1179 lookup = 0; 1180 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 1181 lookup |= LOOKUP_USER_INDEX; 1182 1183 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 1184 if (IS_ERR(ce)) 1185 return PTR_ERR(ce); 1186 1187 /* Only render engine supports RPCS configuration. */ 1188 if (ce->engine->class != RENDER_CLASS) { 1189 ret = -ENODEV; 1190 goto out_ce; 1191 } 1192 1193 ret = user_to_context_sseu(i915, &user_sseu, &sseu); 1194 if (ret) 1195 goto out_ce; 1196 1197 ret = intel_context_reconfigure_sseu(ce, sseu); 1198 if (ret) 1199 goto out_ce; 1200 1201 args->size = sizeof(user_sseu); 1202 1203 out_ce: 1204 intel_context_put(ce); 1205 return ret; 1206 } 1207 1208 struct set_engines { 1209 struct i915_gem_context *ctx; 1210 struct i915_gem_engines *engines; 1211 }; 1212 1213 static int 1214 set_engines__load_balance(struct i915_user_extension __user *base, void *data) 1215 { 1216 struct i915_context_engines_load_balance __user *ext = 1217 container_of_user(base, typeof(*ext), base); 1218 const struct set_engines *set = data; 1219 struct intel_engine_cs *stack[16]; 1220 struct intel_engine_cs **siblings; 1221 struct intel_context *ce; 1222 u16 num_siblings, idx; 1223 unsigned int n; 1224 int err; 1225 1226 if (!HAS_EXECLISTS(set->ctx->i915)) 1227 return -ENODEV; 1228 1229 if (USES_GUC_SUBMISSION(set->ctx->i915)) 1230 return -ENODEV; /* not implement yet */ 1231 1232 if (get_user(idx, &ext->engine_index)) 1233 return -EFAULT; 1234 1235 if (idx >= set->engines->num_engines) { 1236 DRM_DEBUG("Invalid placement value, %d >= %d\n", 1237 idx, set->engines->num_engines); 1238 return -EINVAL; 1239 } 1240 1241 idx = array_index_nospec(idx, set->engines->num_engines); 1242 if (set->engines->engines[idx]) { 1243 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx); 1244 return -EEXIST; 1245 } 1246 1247 if (get_user(num_siblings, &ext->num_siblings)) 1248 return -EFAULT; 1249 1250 err = check_user_mbz(&ext->flags); 1251 if (err) 1252 return err; 1253 1254 err = check_user_mbz(&ext->mbz64); 1255 if (err) 1256 return err; 1257 1258 siblings = stack; 1259 if (num_siblings > ARRAY_SIZE(stack)) { 1260 siblings = kmalloc_array(num_siblings, 1261 sizeof(*siblings), 1262 GFP_KERNEL); 1263 if (!siblings) 1264 return -ENOMEM; 1265 } 1266 1267 for (n = 0; n < num_siblings; n++) { 1268 struct i915_engine_class_instance ci; 1269 1270 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 1271 err = -EFAULT; 1272 goto out_siblings; 1273 } 1274 1275 siblings[n] = intel_engine_lookup_user(set->ctx->i915, 1276 ci.engine_class, 1277 ci.engine_instance); 1278 if (!siblings[n]) { 1279 DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n", 1280 n, ci.engine_class, ci.engine_instance); 1281 err = -EINVAL; 1282 goto out_siblings; 1283 } 1284 } 1285 1286 ce = intel_execlists_create_virtual(set->ctx, siblings, n); 1287 if (IS_ERR(ce)) { 1288 err = PTR_ERR(ce); 1289 goto out_siblings; 1290 } 1291 1292 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { 1293 intel_context_put(ce); 1294 err = -EEXIST; 1295 goto out_siblings; 1296 } 1297 1298 out_siblings: 1299 if (siblings != stack) 1300 kfree(siblings); 1301 1302 return err; 1303 } 1304 1305 static int 1306 set_engines__bond(struct i915_user_extension __user *base, void *data) 1307 { 1308 struct i915_context_engines_bond __user *ext = 1309 container_of_user(base, typeof(*ext), base); 1310 const struct set_engines *set = data; 1311 struct i915_engine_class_instance ci; 1312 struct intel_engine_cs *virtual; 1313 struct intel_engine_cs *master; 1314 u16 idx, num_bonds; 1315 int err, n; 1316 1317 if (get_user(idx, &ext->virtual_index)) 1318 return -EFAULT; 1319 1320 if (idx >= set->engines->num_engines) { 1321 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n", 1322 idx, set->engines->num_engines); 1323 return -EINVAL; 1324 } 1325 1326 idx = array_index_nospec(idx, set->engines->num_engines); 1327 if (!set->engines->engines[idx]) { 1328 DRM_DEBUG("Invalid engine at %d\n", idx); 1329 return -EINVAL; 1330 } 1331 virtual = set->engines->engines[idx]->engine; 1332 1333 err = check_user_mbz(&ext->flags); 1334 if (err) 1335 return err; 1336 1337 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 1338 err = check_user_mbz(&ext->mbz64[n]); 1339 if (err) 1340 return err; 1341 } 1342 1343 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 1344 return -EFAULT; 1345 1346 master = intel_engine_lookup_user(set->ctx->i915, 1347 ci.engine_class, ci.engine_instance); 1348 if (!master) { 1349 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n", 1350 ci.engine_class, ci.engine_instance); 1351 return -EINVAL; 1352 } 1353 1354 if (get_user(num_bonds, &ext->num_bonds)) 1355 return -EFAULT; 1356 1357 for (n = 0; n < num_bonds; n++) { 1358 struct intel_engine_cs *bond; 1359 1360 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 1361 return -EFAULT; 1362 1363 bond = intel_engine_lookup_user(set->ctx->i915, 1364 ci.engine_class, 1365 ci.engine_instance); 1366 if (!bond) { 1367 DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 1368 n, ci.engine_class, ci.engine_instance); 1369 return -EINVAL; 1370 } 1371 1372 /* 1373 * A non-virtual engine has no siblings to choose between; and 1374 * a submit fence will always be directed to the one engine. 1375 */ 1376 if (intel_engine_is_virtual(virtual)) { 1377 err = intel_virtual_engine_attach_bond(virtual, 1378 master, 1379 bond); 1380 if (err) 1381 return err; 1382 } 1383 } 1384 1385 return 0; 1386 } 1387 1388 static const i915_user_extension_fn set_engines__extensions[] = { 1389 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, 1390 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, 1391 }; 1392 1393 static int 1394 set_engines(struct i915_gem_context *ctx, 1395 const struct drm_i915_gem_context_param *args) 1396 { 1397 struct i915_context_param_engines __user *user = 1398 u64_to_user_ptr(args->value); 1399 struct set_engines set = { .ctx = ctx }; 1400 unsigned int num_engines, n; 1401 u64 extensions; 1402 int err; 1403 1404 if (!args->size) { /* switch back to legacy user_ring_map */ 1405 if (!i915_gem_context_user_engines(ctx)) 1406 return 0; 1407 1408 set.engines = default_engines(ctx); 1409 if (IS_ERR(set.engines)) 1410 return PTR_ERR(set.engines); 1411 1412 goto replace; 1413 } 1414 1415 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); 1416 if (args->size < sizeof(*user) || 1417 !IS_ALIGNED(args->size, sizeof(*user->engines))) { 1418 DRM_DEBUG("Invalid size for engine array: %d\n", 1419 args->size); 1420 return -EINVAL; 1421 } 1422 1423 /* 1424 * Note that I915_EXEC_RING_MASK limits execbuf to only using the 1425 * first 64 engines defined here. 1426 */ 1427 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 1428 1429 set.engines = kmalloc(struct_size(set.engines, engines, num_engines), 1430 GFP_KERNEL); 1431 if (!set.engines) 1432 return -ENOMEM; 1433 1434 init_rcu_head(&set.engines->rcu); 1435 for (n = 0; n < num_engines; n++) { 1436 struct i915_engine_class_instance ci; 1437 struct intel_engine_cs *engine; 1438 struct intel_context *ce; 1439 1440 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 1441 __free_engines(set.engines, n); 1442 return -EFAULT; 1443 } 1444 1445 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 1446 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { 1447 set.engines->engines[n] = NULL; 1448 continue; 1449 } 1450 1451 engine = intel_engine_lookup_user(ctx->i915, 1452 ci.engine_class, 1453 ci.engine_instance); 1454 if (!engine) { 1455 DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n", 1456 n, ci.engine_class, ci.engine_instance); 1457 __free_engines(set.engines, n); 1458 return -ENOENT; 1459 } 1460 1461 ce = intel_context_create(ctx, engine); 1462 if (IS_ERR(ce)) { 1463 __free_engines(set.engines, n); 1464 return PTR_ERR(ce); 1465 } 1466 1467 set.engines->engines[n] = ce; 1468 } 1469 set.engines->num_engines = num_engines; 1470 1471 err = -EFAULT; 1472 if (!get_user(extensions, &user->extensions)) 1473 err = i915_user_extensions(u64_to_user_ptr(extensions), 1474 set_engines__extensions, 1475 ARRAY_SIZE(set_engines__extensions), 1476 &set); 1477 if (err) { 1478 free_engines(set.engines); 1479 return err; 1480 } 1481 1482 replace: 1483 mutex_lock(&ctx->engines_mutex); 1484 if (args->size) 1485 i915_gem_context_set_user_engines(ctx); 1486 else 1487 i915_gem_context_clear_user_engines(ctx); 1488 rcu_swap_protected(ctx->engines, set.engines, 1); 1489 mutex_unlock(&ctx->engines_mutex); 1490 1491 call_rcu(&set.engines->rcu, free_engines_rcu); 1492 1493 return 0; 1494 } 1495 1496 static struct i915_gem_engines * 1497 __copy_engines(struct i915_gem_engines *e) 1498 { 1499 struct i915_gem_engines *copy; 1500 unsigned int n; 1501 1502 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 1503 if (!copy) 1504 return ERR_PTR(-ENOMEM); 1505 1506 init_rcu_head(©->rcu); 1507 for (n = 0; n < e->num_engines; n++) { 1508 if (e->engines[n]) 1509 copy->engines[n] = intel_context_get(e->engines[n]); 1510 else 1511 copy->engines[n] = NULL; 1512 } 1513 copy->num_engines = n; 1514 1515 return copy; 1516 } 1517 1518 static int 1519 get_engines(struct i915_gem_context *ctx, 1520 struct drm_i915_gem_context_param *args) 1521 { 1522 struct i915_context_param_engines __user *user; 1523 struct i915_gem_engines *e; 1524 size_t n, count, size; 1525 int err = 0; 1526 1527 err = mutex_lock_interruptible(&ctx->engines_mutex); 1528 if (err) 1529 return err; 1530 1531 e = NULL; 1532 if (i915_gem_context_user_engines(ctx)) 1533 e = __copy_engines(i915_gem_context_engines(ctx)); 1534 mutex_unlock(&ctx->engines_mutex); 1535 if (IS_ERR_OR_NULL(e)) { 1536 args->size = 0; 1537 return PTR_ERR_OR_ZERO(e); 1538 } 1539 1540 count = e->num_engines; 1541 1542 /* Be paranoid in case we have an impedance mismatch */ 1543 if (!check_struct_size(user, engines, count, &size)) { 1544 err = -EINVAL; 1545 goto err_free; 1546 } 1547 if (overflows_type(size, args->size)) { 1548 err = -EINVAL; 1549 goto err_free; 1550 } 1551 1552 if (!args->size) { 1553 args->size = size; 1554 goto err_free; 1555 } 1556 1557 if (args->size < size) { 1558 err = -EINVAL; 1559 goto err_free; 1560 } 1561 1562 user = u64_to_user_ptr(args->value); 1563 if (!access_ok(user, size)) { 1564 err = -EFAULT; 1565 goto err_free; 1566 } 1567 1568 if (put_user(0, &user->extensions)) { 1569 err = -EFAULT; 1570 goto err_free; 1571 } 1572 1573 for (n = 0; n < count; n++) { 1574 struct i915_engine_class_instance ci = { 1575 .engine_class = I915_ENGINE_CLASS_INVALID, 1576 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, 1577 }; 1578 1579 if (e->engines[n]) { 1580 ci.engine_class = e->engines[n]->engine->uabi_class; 1581 ci.engine_instance = e->engines[n]->engine->uabi_instance; 1582 } 1583 1584 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { 1585 err = -EFAULT; 1586 goto err_free; 1587 } 1588 } 1589 1590 args->size = size; 1591 1592 err_free: 1593 free_engines(e); 1594 return err; 1595 } 1596 1597 static int ctx_setparam(struct drm_i915_file_private *fpriv, 1598 struct i915_gem_context *ctx, 1599 struct drm_i915_gem_context_param *args) 1600 { 1601 int ret = 0; 1602 1603 switch (args->param) { 1604 case I915_CONTEXT_PARAM_NO_ZEROMAP: 1605 if (args->size) 1606 ret = -EINVAL; 1607 else if (args->value) 1608 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 1609 else 1610 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 1611 break; 1612 1613 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 1614 if (args->size) 1615 ret = -EINVAL; 1616 else if (args->value) 1617 i915_gem_context_set_no_error_capture(ctx); 1618 else 1619 i915_gem_context_clear_no_error_capture(ctx); 1620 break; 1621 1622 case I915_CONTEXT_PARAM_BANNABLE: 1623 if (args->size) 1624 ret = -EINVAL; 1625 else if (!capable(CAP_SYS_ADMIN) && !args->value) 1626 ret = -EPERM; 1627 else if (args->value) 1628 i915_gem_context_set_bannable(ctx); 1629 else 1630 i915_gem_context_clear_bannable(ctx); 1631 break; 1632 1633 case I915_CONTEXT_PARAM_RECOVERABLE: 1634 if (args->size) 1635 ret = -EINVAL; 1636 else if (args->value) 1637 i915_gem_context_set_recoverable(ctx); 1638 else 1639 i915_gem_context_clear_recoverable(ctx); 1640 break; 1641 1642 case I915_CONTEXT_PARAM_PRIORITY: 1643 { 1644 s64 priority = args->value; 1645 1646 if (args->size) 1647 ret = -EINVAL; 1648 else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 1649 ret = -ENODEV; 1650 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 1651 priority < I915_CONTEXT_MIN_USER_PRIORITY) 1652 ret = -EINVAL; 1653 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 1654 !capable(CAP_SYS_NICE)) 1655 ret = -EPERM; 1656 else 1657 ctx->sched.priority = 1658 I915_USER_PRIORITY(priority); 1659 } 1660 break; 1661 1662 case I915_CONTEXT_PARAM_SSEU: 1663 ret = set_sseu(ctx, args); 1664 break; 1665 1666 case I915_CONTEXT_PARAM_VM: 1667 ret = set_ppgtt(fpriv, ctx, args); 1668 break; 1669 1670 case I915_CONTEXT_PARAM_ENGINES: 1671 ret = set_engines(ctx, args); 1672 break; 1673 1674 case I915_CONTEXT_PARAM_BAN_PERIOD: 1675 default: 1676 ret = -EINVAL; 1677 break; 1678 } 1679 1680 return ret; 1681 } 1682 1683 struct create_ext { 1684 struct i915_gem_context *ctx; 1685 struct drm_i915_file_private *fpriv; 1686 }; 1687 1688 static int create_setparam(struct i915_user_extension __user *ext, void *data) 1689 { 1690 struct drm_i915_gem_context_create_ext_setparam local; 1691 const struct create_ext *arg = data; 1692 1693 if (copy_from_user(&local, ext, sizeof(local))) 1694 return -EFAULT; 1695 1696 if (local.param.ctx_id) 1697 return -EINVAL; 1698 1699 return ctx_setparam(arg->fpriv, arg->ctx, &local.param); 1700 } 1701 1702 static int clone_engines(struct i915_gem_context *dst, 1703 struct i915_gem_context *src) 1704 { 1705 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 1706 struct i915_gem_engines *clone; 1707 bool user_engines; 1708 unsigned long n; 1709 1710 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 1711 if (!clone) 1712 goto err_unlock; 1713 1714 init_rcu_head(&clone->rcu); 1715 for (n = 0; n < e->num_engines; n++) { 1716 struct intel_engine_cs *engine; 1717 1718 if (!e->engines[n]) { 1719 clone->engines[n] = NULL; 1720 continue; 1721 } 1722 engine = e->engines[n]->engine; 1723 1724 /* 1725 * Virtual engines are singletons; they can only exist 1726 * inside a single context, because they embed their 1727 * HW context... As each virtual context implies a single 1728 * timeline (each engine can only dequeue a single request 1729 * at any time), it would be surprising for two contexts 1730 * to use the same engine. So let's create a copy of 1731 * the virtual engine instead. 1732 */ 1733 if (intel_engine_is_virtual(engine)) 1734 clone->engines[n] = 1735 intel_execlists_clone_virtual(dst, engine); 1736 else 1737 clone->engines[n] = intel_context_create(dst, engine); 1738 if (IS_ERR_OR_NULL(clone->engines[n])) { 1739 __free_engines(clone, n); 1740 goto err_unlock; 1741 } 1742 } 1743 clone->num_engines = n; 1744 1745 user_engines = i915_gem_context_user_engines(src); 1746 i915_gem_context_unlock_engines(src); 1747 1748 free_engines(dst->engines); 1749 RCU_INIT_POINTER(dst->engines, clone); 1750 if (user_engines) 1751 i915_gem_context_set_user_engines(dst); 1752 else 1753 i915_gem_context_clear_user_engines(dst); 1754 return 0; 1755 1756 err_unlock: 1757 i915_gem_context_unlock_engines(src); 1758 return -ENOMEM; 1759 } 1760 1761 static int clone_flags(struct i915_gem_context *dst, 1762 struct i915_gem_context *src) 1763 { 1764 dst->user_flags = src->user_flags; 1765 return 0; 1766 } 1767 1768 static int clone_schedattr(struct i915_gem_context *dst, 1769 struct i915_gem_context *src) 1770 { 1771 dst->sched = src->sched; 1772 return 0; 1773 } 1774 1775 static int clone_sseu(struct i915_gem_context *dst, 1776 struct i915_gem_context *src) 1777 { 1778 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 1779 struct i915_gem_engines *clone; 1780 unsigned long n; 1781 int err; 1782 1783 clone = dst->engines; /* no locking required; sole access */ 1784 if (e->num_engines != clone->num_engines) { 1785 err = -EINVAL; 1786 goto unlock; 1787 } 1788 1789 for (n = 0; n < e->num_engines; n++) { 1790 struct intel_context *ce = e->engines[n]; 1791 1792 if (clone->engines[n]->engine->class != ce->engine->class) { 1793 /* Must have compatible engine maps! */ 1794 err = -EINVAL; 1795 goto unlock; 1796 } 1797 1798 /* serialises with set_sseu */ 1799 err = intel_context_lock_pinned(ce); 1800 if (err) 1801 goto unlock; 1802 1803 clone->engines[n]->sseu = ce->sseu; 1804 intel_context_unlock_pinned(ce); 1805 } 1806 1807 err = 0; 1808 unlock: 1809 i915_gem_context_unlock_engines(src); 1810 return err; 1811 } 1812 1813 static int clone_timeline(struct i915_gem_context *dst, 1814 struct i915_gem_context *src) 1815 { 1816 if (src->timeline) 1817 __assign_timeline(dst, src->timeline); 1818 1819 return 0; 1820 } 1821 1822 static int clone_vm(struct i915_gem_context *dst, 1823 struct i915_gem_context *src) 1824 { 1825 struct i915_address_space *vm; 1826 int err = 0; 1827 1828 rcu_read_lock(); 1829 do { 1830 vm = rcu_dereference(src->vm); 1831 if (!vm) 1832 break; 1833 1834 if (!kref_get_unless_zero(&vm->ref)) 1835 continue; 1836 1837 /* 1838 * This ppgtt may have be reallocated between 1839 * the read and the kref, and reassigned to a third 1840 * context. In order to avoid inadvertent sharing 1841 * of this ppgtt with that third context (and not 1842 * src), we have to confirm that we have the same 1843 * ppgtt after passing through the strong memory 1844 * barrier implied by a successful 1845 * kref_get_unless_zero(). 1846 * 1847 * Once we have acquired the current ppgtt of src, 1848 * we no longer care if it is released from src, as 1849 * it cannot be reallocated elsewhere. 1850 */ 1851 1852 if (vm == rcu_access_pointer(src->vm)) 1853 break; 1854 1855 i915_vm_put(vm); 1856 } while (1); 1857 rcu_read_unlock(); 1858 1859 if (vm) { 1860 if (!mutex_lock_interruptible(&dst->mutex)) { 1861 __assign_ppgtt(dst, vm); 1862 mutex_unlock(&dst->mutex); 1863 } else { 1864 err = -EINTR; 1865 } 1866 i915_vm_put(vm); 1867 } 1868 1869 return err; 1870 } 1871 1872 static int create_clone(struct i915_user_extension __user *ext, void *data) 1873 { 1874 static int (* const fn[])(struct i915_gem_context *dst, 1875 struct i915_gem_context *src) = { 1876 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y 1877 MAP(ENGINES, clone_engines), 1878 MAP(FLAGS, clone_flags), 1879 MAP(SCHEDATTR, clone_schedattr), 1880 MAP(SSEU, clone_sseu), 1881 MAP(TIMELINE, clone_timeline), 1882 MAP(VM, clone_vm), 1883 #undef MAP 1884 }; 1885 struct drm_i915_gem_context_create_ext_clone local; 1886 const struct create_ext *arg = data; 1887 struct i915_gem_context *dst = arg->ctx; 1888 struct i915_gem_context *src; 1889 int err, bit; 1890 1891 if (copy_from_user(&local, ext, sizeof(local))) 1892 return -EFAULT; 1893 1894 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != 1895 I915_CONTEXT_CLONE_UNKNOWN); 1896 1897 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) 1898 return -EINVAL; 1899 1900 if (local.rsvd) 1901 return -EINVAL; 1902 1903 rcu_read_lock(); 1904 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); 1905 rcu_read_unlock(); 1906 if (!src) 1907 return -ENOENT; 1908 1909 GEM_BUG_ON(src == dst); 1910 1911 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { 1912 if (!(local.flags & BIT(bit))) 1913 continue; 1914 1915 err = fn[bit](dst, src); 1916 if (err) 1917 return err; 1918 } 1919 1920 return 0; 1921 } 1922 1923 static const i915_user_extension_fn create_extensions[] = { 1924 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 1925 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, 1926 }; 1927 1928 static bool client_is_banned(struct drm_i915_file_private *file_priv) 1929 { 1930 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 1931 } 1932 1933 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 1934 struct drm_file *file) 1935 { 1936 struct drm_i915_private *i915 = to_i915(dev); 1937 struct drm_i915_gem_context_create_ext *args = data; 1938 struct create_ext ext_data; 1939 int ret; 1940 1941 if (!DRIVER_CAPS(i915)->has_logical_contexts) 1942 return -ENODEV; 1943 1944 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 1945 return -EINVAL; 1946 1947 ret = intel_gt_terminally_wedged(&i915->gt); 1948 if (ret) 1949 return ret; 1950 1951 ext_data.fpriv = file->driver_priv; 1952 if (client_is_banned(ext_data.fpriv)) { 1953 DRM_DEBUG("client %s[%d] banned from creating ctx\n", 1954 current->comm, 1955 pid_nr(get_task_pid(current, PIDTYPE_PID))); 1956 return -EIO; 1957 } 1958 1959 ext_data.ctx = i915_gem_create_context(i915, args->flags); 1960 if (IS_ERR(ext_data.ctx)) 1961 return PTR_ERR(ext_data.ctx); 1962 1963 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 1964 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 1965 create_extensions, 1966 ARRAY_SIZE(create_extensions), 1967 &ext_data); 1968 if (ret) 1969 goto err_ctx; 1970 } 1971 1972 ret = gem_context_register(ext_data.ctx, ext_data.fpriv); 1973 if (ret < 0) 1974 goto err_ctx; 1975 1976 args->ctx_id = ret; 1977 DRM_DEBUG("HW context %d created\n", args->ctx_id); 1978 1979 return 0; 1980 1981 err_ctx: 1982 context_close(ext_data.ctx); 1983 return ret; 1984 } 1985 1986 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 1987 struct drm_file *file) 1988 { 1989 struct drm_i915_gem_context_destroy *args = data; 1990 struct drm_i915_file_private *file_priv = file->driver_priv; 1991 struct i915_gem_context *ctx; 1992 1993 if (args->pad != 0) 1994 return -EINVAL; 1995 1996 if (!args->ctx_id) 1997 return -ENOENT; 1998 1999 if (mutex_lock_interruptible(&file_priv->context_idr_lock)) 2000 return -EINTR; 2001 2002 ctx = idr_remove(&file_priv->context_idr, args->ctx_id); 2003 mutex_unlock(&file_priv->context_idr_lock); 2004 if (!ctx) 2005 return -ENOENT; 2006 2007 context_close(ctx); 2008 return 0; 2009 } 2010 2011 static int get_sseu(struct i915_gem_context *ctx, 2012 struct drm_i915_gem_context_param *args) 2013 { 2014 struct drm_i915_gem_context_param_sseu user_sseu; 2015 struct intel_context *ce; 2016 unsigned long lookup; 2017 int err; 2018 2019 if (args->size == 0) 2020 goto out; 2021 else if (args->size < sizeof(user_sseu)) 2022 return -EINVAL; 2023 2024 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2025 sizeof(user_sseu))) 2026 return -EFAULT; 2027 2028 if (user_sseu.rsvd) 2029 return -EINVAL; 2030 2031 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2032 return -EINVAL; 2033 2034 lookup = 0; 2035 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2036 lookup |= LOOKUP_USER_INDEX; 2037 2038 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2039 if (IS_ERR(ce)) 2040 return PTR_ERR(ce); 2041 2042 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2043 if (err) { 2044 intel_context_put(ce); 2045 return err; 2046 } 2047 2048 user_sseu.slice_mask = ce->sseu.slice_mask; 2049 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2050 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2051 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2052 2053 intel_context_unlock_pinned(ce); 2054 intel_context_put(ce); 2055 2056 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2057 sizeof(user_sseu))) 2058 return -EFAULT; 2059 2060 out: 2061 args->size = sizeof(user_sseu); 2062 2063 return 0; 2064 } 2065 2066 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2067 struct drm_file *file) 2068 { 2069 struct drm_i915_file_private *file_priv = file->driver_priv; 2070 struct drm_i915_gem_context_param *args = data; 2071 struct i915_gem_context *ctx; 2072 int ret = 0; 2073 2074 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2075 if (!ctx) 2076 return -ENOENT; 2077 2078 switch (args->param) { 2079 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2080 args->size = 0; 2081 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2082 break; 2083 2084 case I915_CONTEXT_PARAM_GTT_SIZE: 2085 args->size = 0; 2086 rcu_read_lock(); 2087 if (rcu_access_pointer(ctx->vm)) 2088 args->value = rcu_dereference(ctx->vm)->total; 2089 else 2090 args->value = to_i915(dev)->ggtt.vm.total; 2091 rcu_read_unlock(); 2092 break; 2093 2094 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2095 args->size = 0; 2096 args->value = i915_gem_context_no_error_capture(ctx); 2097 break; 2098 2099 case I915_CONTEXT_PARAM_BANNABLE: 2100 args->size = 0; 2101 args->value = i915_gem_context_is_bannable(ctx); 2102 break; 2103 2104 case I915_CONTEXT_PARAM_RECOVERABLE: 2105 args->size = 0; 2106 args->value = i915_gem_context_is_recoverable(ctx); 2107 break; 2108 2109 case I915_CONTEXT_PARAM_PRIORITY: 2110 args->size = 0; 2111 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; 2112 break; 2113 2114 case I915_CONTEXT_PARAM_SSEU: 2115 ret = get_sseu(ctx, args); 2116 break; 2117 2118 case I915_CONTEXT_PARAM_VM: 2119 ret = get_ppgtt(file_priv, ctx, args); 2120 break; 2121 2122 case I915_CONTEXT_PARAM_ENGINES: 2123 ret = get_engines(ctx, args); 2124 break; 2125 2126 case I915_CONTEXT_PARAM_BAN_PERIOD: 2127 default: 2128 ret = -EINVAL; 2129 break; 2130 } 2131 2132 i915_gem_context_put(ctx); 2133 return ret; 2134 } 2135 2136 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2137 struct drm_file *file) 2138 { 2139 struct drm_i915_file_private *file_priv = file->driver_priv; 2140 struct drm_i915_gem_context_param *args = data; 2141 struct i915_gem_context *ctx; 2142 int ret; 2143 2144 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2145 if (!ctx) 2146 return -ENOENT; 2147 2148 ret = ctx_setparam(file_priv, ctx, args); 2149 2150 i915_gem_context_put(ctx); 2151 return ret; 2152 } 2153 2154 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2155 void *data, struct drm_file *file) 2156 { 2157 struct drm_i915_private *i915 = to_i915(dev); 2158 struct drm_i915_reset_stats *args = data; 2159 struct i915_gem_context *ctx; 2160 int ret; 2161 2162 if (args->flags || args->pad) 2163 return -EINVAL; 2164 2165 ret = -ENOENT; 2166 rcu_read_lock(); 2167 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); 2168 if (!ctx) 2169 goto out; 2170 2171 /* 2172 * We opt for unserialised reads here. This may result in tearing 2173 * in the extremely unlikely event of a GPU hang on this context 2174 * as we are querying them. If we need that extra layer of protection, 2175 * we should wrap the hangstats with a seqlock. 2176 */ 2177 2178 if (capable(CAP_SYS_ADMIN)) 2179 args->reset_count = i915_reset_count(&i915->gpu_error); 2180 else 2181 args->reset_count = 0; 2182 2183 args->batch_active = atomic_read(&ctx->guilty_count); 2184 args->batch_pending = atomic_read(&ctx->active_count); 2185 2186 ret = 0; 2187 out: 2188 rcu_read_unlock(); 2189 return ret; 2190 } 2191 2192 /* GEM context-engines iterator: for_each_gem_engine() */ 2193 struct intel_context * 2194 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2195 { 2196 const struct i915_gem_engines *e = it->engines; 2197 struct intel_context *ctx; 2198 2199 do { 2200 if (it->idx >= e->num_engines) 2201 return NULL; 2202 2203 ctx = e->engines[it->idx++]; 2204 } while (!ctx); 2205 2206 return ctx; 2207 } 2208 2209 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2210 #include "selftests/mock_context.c" 2211 #include "selftests/i915_gem_context.c" 2212 #endif 2213 2214 static void i915_global_gem_context_shrink(void) 2215 { 2216 kmem_cache_shrink(global.slab_luts); 2217 } 2218 2219 static void i915_global_gem_context_exit(void) 2220 { 2221 kmem_cache_destroy(global.slab_luts); 2222 } 2223 2224 static struct i915_global_gem_context global = { { 2225 .shrink = i915_global_gem_context_shrink, 2226 .exit = i915_global_gem_context_exit, 2227 } }; 2228 2229 int __init i915_global_gem_context_init(void) 2230 { 2231 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2232 if (!global.slab_luts) 2233 return -ENOMEM; 2234 2235 i915_global_register(&global.base); 2236 return 0; 2237 } 2238