1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include <linux/prime_numbers.h> 8 9 #include "gem/i915_gem_pm.h" 10 #include "gt/intel_reset.h" 11 12 #include "i915_selftest.h" 13 #include "selftests/i915_random.h" 14 #include "selftests/igt_flush_test.h" 15 #include "selftests/igt_live_test.h" 16 #include "selftests/igt_spinner.h" 17 #include "selftests/lib_sw_fence.h" 18 19 #include "gem/selftests/igt_gem_utils.h" 20 #include "gem/selftests/mock_context.h" 21 22 static int live_sanitycheck(void *arg) 23 { 24 struct drm_i915_private *i915 = arg; 25 struct intel_engine_cs *engine; 26 struct i915_gem_context *ctx; 27 enum intel_engine_id id; 28 struct igt_spinner spin; 29 intel_wakeref_t wakeref; 30 int err = -ENOMEM; 31 32 if (!HAS_LOGICAL_RING_CONTEXTS(i915)) 33 return 0; 34 35 mutex_lock(&i915->drm.struct_mutex); 36 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 37 38 if (igt_spinner_init(&spin, i915)) 39 goto err_unlock; 40 41 ctx = kernel_context(i915); 42 if (!ctx) 43 goto err_spin; 44 45 for_each_engine(engine, i915, id) { 46 struct i915_request *rq; 47 48 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); 49 if (IS_ERR(rq)) { 50 err = PTR_ERR(rq); 51 goto err_ctx; 52 } 53 54 i915_request_add(rq); 55 if (!igt_wait_for_spinner(&spin, rq)) { 56 GEM_TRACE("spinner failed to start\n"); 57 GEM_TRACE_DUMP(); 58 i915_gem_set_wedged(i915); 59 err = -EIO; 60 goto err_ctx; 61 } 62 63 igt_spinner_end(&spin); 64 if (igt_flush_test(i915, I915_WAIT_LOCKED)) { 65 err = -EIO; 66 goto err_ctx; 67 } 68 } 69 70 err = 0; 71 err_ctx: 72 kernel_context_close(ctx); 73 err_spin: 74 igt_spinner_fini(&spin); 75 err_unlock: 76 igt_flush_test(i915, I915_WAIT_LOCKED); 77 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 78 mutex_unlock(&i915->drm.struct_mutex); 79 return err; 80 } 81 82 static int live_busywait_preempt(void *arg) 83 { 84 struct drm_i915_private *i915 = arg; 85 struct i915_gem_context *ctx_hi, *ctx_lo; 86 struct intel_engine_cs *engine; 87 struct drm_i915_gem_object *obj; 88 struct i915_vma *vma; 89 enum intel_engine_id id; 90 intel_wakeref_t wakeref; 91 int err = -ENOMEM; 92 u32 *map; 93 94 /* 95 * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can 96 * preempt the busywaits used to synchronise between rings. 97 */ 98 99 mutex_lock(&i915->drm.struct_mutex); 100 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 101 102 ctx_hi = kernel_context(i915); 103 if (!ctx_hi) 104 goto err_unlock; 105 ctx_hi->sched.priority = 106 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); 107 108 ctx_lo = kernel_context(i915); 109 if (!ctx_lo) 110 goto err_ctx_hi; 111 ctx_lo->sched.priority = 112 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); 113 114 obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 115 if (IS_ERR(obj)) { 116 err = PTR_ERR(obj); 117 goto err_ctx_lo; 118 } 119 120 map = i915_gem_object_pin_map(obj, I915_MAP_WC); 121 if (IS_ERR(map)) { 122 err = PTR_ERR(map); 123 goto err_obj; 124 } 125 126 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); 127 if (IS_ERR(vma)) { 128 err = PTR_ERR(vma); 129 goto err_map; 130 } 131 132 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); 133 if (err) 134 goto err_map; 135 136 for_each_engine(engine, i915, id) { 137 struct i915_request *lo, *hi; 138 struct igt_live_test t; 139 u32 *cs; 140 141 if (!intel_engine_can_store_dword(engine)) 142 continue; 143 144 if (igt_live_test_begin(&t, i915, __func__, engine->name)) { 145 err = -EIO; 146 goto err_vma; 147 } 148 149 /* 150 * We create two requests. The low priority request 151 * busywaits on a semaphore (inside the ringbuffer where 152 * is should be preemptible) and the high priority requests 153 * uses a MI_STORE_DWORD_IMM to update the semaphore value 154 * allowing the first request to complete. If preemption 155 * fails, we hang instead. 156 */ 157 158 lo = igt_request_alloc(ctx_lo, engine); 159 if (IS_ERR(lo)) { 160 err = PTR_ERR(lo); 161 goto err_vma; 162 } 163 164 cs = intel_ring_begin(lo, 8); 165 if (IS_ERR(cs)) { 166 err = PTR_ERR(cs); 167 i915_request_add(lo); 168 goto err_vma; 169 } 170 171 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 172 *cs++ = i915_ggtt_offset(vma); 173 *cs++ = 0; 174 *cs++ = 1; 175 176 /* XXX Do we need a flush + invalidate here? */ 177 178 *cs++ = MI_SEMAPHORE_WAIT | 179 MI_SEMAPHORE_GLOBAL_GTT | 180 MI_SEMAPHORE_POLL | 181 MI_SEMAPHORE_SAD_EQ_SDD; 182 *cs++ = 0; 183 *cs++ = i915_ggtt_offset(vma); 184 *cs++ = 0; 185 186 intel_ring_advance(lo, cs); 187 i915_request_add(lo); 188 189 if (wait_for(READ_ONCE(*map), 10)) { 190 err = -ETIMEDOUT; 191 goto err_vma; 192 } 193 194 /* Low priority request should be busywaiting now */ 195 if (i915_request_wait(lo, 0, 1) != -ETIME) { 196 pr_err("%s: Busywaiting request did not!\n", 197 engine->name); 198 err = -EIO; 199 goto err_vma; 200 } 201 202 hi = igt_request_alloc(ctx_hi, engine); 203 if (IS_ERR(hi)) { 204 err = PTR_ERR(hi); 205 goto err_vma; 206 } 207 208 cs = intel_ring_begin(hi, 4); 209 if (IS_ERR(cs)) { 210 err = PTR_ERR(cs); 211 i915_request_add(hi); 212 goto err_vma; 213 } 214 215 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 216 *cs++ = i915_ggtt_offset(vma); 217 *cs++ = 0; 218 *cs++ = 0; 219 220 intel_ring_advance(hi, cs); 221 i915_request_add(hi); 222 223 if (i915_request_wait(lo, 0, HZ / 5) < 0) { 224 struct drm_printer p = drm_info_printer(i915->drm.dev); 225 226 pr_err("%s: Failed to preempt semaphore busywait!\n", 227 engine->name); 228 229 intel_engine_dump(engine, &p, "%s\n", engine->name); 230 GEM_TRACE_DUMP(); 231 232 i915_gem_set_wedged(i915); 233 err = -EIO; 234 goto err_vma; 235 } 236 GEM_BUG_ON(READ_ONCE(*map)); 237 238 if (igt_live_test_end(&t)) { 239 err = -EIO; 240 goto err_vma; 241 } 242 } 243 244 err = 0; 245 err_vma: 246 i915_vma_unpin(vma); 247 err_map: 248 i915_gem_object_unpin_map(obj); 249 err_obj: 250 i915_gem_object_put(obj); 251 err_ctx_lo: 252 kernel_context_close(ctx_lo); 253 err_ctx_hi: 254 kernel_context_close(ctx_hi); 255 err_unlock: 256 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 257 err = -EIO; 258 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 259 mutex_unlock(&i915->drm.struct_mutex); 260 return err; 261 } 262 263 static int live_preempt(void *arg) 264 { 265 struct drm_i915_private *i915 = arg; 266 struct i915_gem_context *ctx_hi, *ctx_lo; 267 struct igt_spinner spin_hi, spin_lo; 268 struct intel_engine_cs *engine; 269 enum intel_engine_id id; 270 intel_wakeref_t wakeref; 271 int err = -ENOMEM; 272 273 if (!HAS_LOGICAL_RING_PREEMPTION(i915)) 274 return 0; 275 276 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 277 pr_err("Logical preemption supported, but not exposed\n"); 278 279 mutex_lock(&i915->drm.struct_mutex); 280 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 281 282 if (igt_spinner_init(&spin_hi, i915)) 283 goto err_unlock; 284 285 if (igt_spinner_init(&spin_lo, i915)) 286 goto err_spin_hi; 287 288 ctx_hi = kernel_context(i915); 289 if (!ctx_hi) 290 goto err_spin_lo; 291 ctx_hi->sched.priority = 292 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); 293 294 ctx_lo = kernel_context(i915); 295 if (!ctx_lo) 296 goto err_ctx_hi; 297 ctx_lo->sched.priority = 298 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); 299 300 for_each_engine(engine, i915, id) { 301 struct igt_live_test t; 302 struct i915_request *rq; 303 304 if (!intel_engine_has_preemption(engine)) 305 continue; 306 307 if (igt_live_test_begin(&t, i915, __func__, engine->name)) { 308 err = -EIO; 309 goto err_ctx_lo; 310 } 311 312 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, 313 MI_ARB_CHECK); 314 if (IS_ERR(rq)) { 315 err = PTR_ERR(rq); 316 goto err_ctx_lo; 317 } 318 319 i915_request_add(rq); 320 if (!igt_wait_for_spinner(&spin_lo, rq)) { 321 GEM_TRACE("lo spinner failed to start\n"); 322 GEM_TRACE_DUMP(); 323 i915_gem_set_wedged(i915); 324 err = -EIO; 325 goto err_ctx_lo; 326 } 327 328 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, 329 MI_ARB_CHECK); 330 if (IS_ERR(rq)) { 331 igt_spinner_end(&spin_lo); 332 err = PTR_ERR(rq); 333 goto err_ctx_lo; 334 } 335 336 i915_request_add(rq); 337 if (!igt_wait_for_spinner(&spin_hi, rq)) { 338 GEM_TRACE("hi spinner failed to start\n"); 339 GEM_TRACE_DUMP(); 340 i915_gem_set_wedged(i915); 341 err = -EIO; 342 goto err_ctx_lo; 343 } 344 345 igt_spinner_end(&spin_hi); 346 igt_spinner_end(&spin_lo); 347 348 if (igt_live_test_end(&t)) { 349 err = -EIO; 350 goto err_ctx_lo; 351 } 352 } 353 354 err = 0; 355 err_ctx_lo: 356 kernel_context_close(ctx_lo); 357 err_ctx_hi: 358 kernel_context_close(ctx_hi); 359 err_spin_lo: 360 igt_spinner_fini(&spin_lo); 361 err_spin_hi: 362 igt_spinner_fini(&spin_hi); 363 err_unlock: 364 igt_flush_test(i915, I915_WAIT_LOCKED); 365 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 366 mutex_unlock(&i915->drm.struct_mutex); 367 return err; 368 } 369 370 static int live_late_preempt(void *arg) 371 { 372 struct drm_i915_private *i915 = arg; 373 struct i915_gem_context *ctx_hi, *ctx_lo; 374 struct igt_spinner spin_hi, spin_lo; 375 struct intel_engine_cs *engine; 376 struct i915_sched_attr attr = {}; 377 enum intel_engine_id id; 378 intel_wakeref_t wakeref; 379 int err = -ENOMEM; 380 381 if (!HAS_LOGICAL_RING_PREEMPTION(i915)) 382 return 0; 383 384 mutex_lock(&i915->drm.struct_mutex); 385 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 386 387 if (igt_spinner_init(&spin_hi, i915)) 388 goto err_unlock; 389 390 if (igt_spinner_init(&spin_lo, i915)) 391 goto err_spin_hi; 392 393 ctx_hi = kernel_context(i915); 394 if (!ctx_hi) 395 goto err_spin_lo; 396 397 ctx_lo = kernel_context(i915); 398 if (!ctx_lo) 399 goto err_ctx_hi; 400 401 for_each_engine(engine, i915, id) { 402 struct igt_live_test t; 403 struct i915_request *rq; 404 405 if (!intel_engine_has_preemption(engine)) 406 continue; 407 408 if (igt_live_test_begin(&t, i915, __func__, engine->name)) { 409 err = -EIO; 410 goto err_ctx_lo; 411 } 412 413 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, 414 MI_ARB_CHECK); 415 if (IS_ERR(rq)) { 416 err = PTR_ERR(rq); 417 goto err_ctx_lo; 418 } 419 420 i915_request_add(rq); 421 if (!igt_wait_for_spinner(&spin_lo, rq)) { 422 pr_err("First context failed to start\n"); 423 goto err_wedged; 424 } 425 426 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, 427 MI_NOOP); 428 if (IS_ERR(rq)) { 429 igt_spinner_end(&spin_lo); 430 err = PTR_ERR(rq); 431 goto err_ctx_lo; 432 } 433 434 i915_request_add(rq); 435 if (igt_wait_for_spinner(&spin_hi, rq)) { 436 pr_err("Second context overtook first?\n"); 437 goto err_wedged; 438 } 439 440 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); 441 engine->schedule(rq, &attr); 442 443 if (!igt_wait_for_spinner(&spin_hi, rq)) { 444 pr_err("High priority context failed to preempt the low priority context\n"); 445 GEM_TRACE_DUMP(); 446 goto err_wedged; 447 } 448 449 igt_spinner_end(&spin_hi); 450 igt_spinner_end(&spin_lo); 451 452 if (igt_live_test_end(&t)) { 453 err = -EIO; 454 goto err_ctx_lo; 455 } 456 } 457 458 err = 0; 459 err_ctx_lo: 460 kernel_context_close(ctx_lo); 461 err_ctx_hi: 462 kernel_context_close(ctx_hi); 463 err_spin_lo: 464 igt_spinner_fini(&spin_lo); 465 err_spin_hi: 466 igt_spinner_fini(&spin_hi); 467 err_unlock: 468 igt_flush_test(i915, I915_WAIT_LOCKED); 469 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 470 mutex_unlock(&i915->drm.struct_mutex); 471 return err; 472 473 err_wedged: 474 igt_spinner_end(&spin_hi); 475 igt_spinner_end(&spin_lo); 476 i915_gem_set_wedged(i915); 477 err = -EIO; 478 goto err_ctx_lo; 479 } 480 481 struct preempt_client { 482 struct igt_spinner spin; 483 struct i915_gem_context *ctx; 484 }; 485 486 static int preempt_client_init(struct drm_i915_private *i915, 487 struct preempt_client *c) 488 { 489 c->ctx = kernel_context(i915); 490 if (!c->ctx) 491 return -ENOMEM; 492 493 if (igt_spinner_init(&c->spin, i915)) 494 goto err_ctx; 495 496 return 0; 497 498 err_ctx: 499 kernel_context_close(c->ctx); 500 return -ENOMEM; 501 } 502 503 static void preempt_client_fini(struct preempt_client *c) 504 { 505 igt_spinner_fini(&c->spin); 506 kernel_context_close(c->ctx); 507 } 508 509 static int live_suppress_self_preempt(void *arg) 510 { 511 struct drm_i915_private *i915 = arg; 512 struct intel_engine_cs *engine; 513 struct i915_sched_attr attr = { 514 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX) 515 }; 516 struct preempt_client a, b; 517 enum intel_engine_id id; 518 intel_wakeref_t wakeref; 519 int err = -ENOMEM; 520 521 /* 522 * Verify that if a preemption request does not cause a change in 523 * the current execution order, the preempt-to-idle injection is 524 * skipped and that we do not accidentally apply it after the CS 525 * completion event. 526 */ 527 528 if (!HAS_LOGICAL_RING_PREEMPTION(i915)) 529 return 0; 530 531 if (USES_GUC_SUBMISSION(i915)) 532 return 0; /* presume black blox */ 533 534 mutex_lock(&i915->drm.struct_mutex); 535 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 536 537 if (preempt_client_init(i915, &a)) 538 goto err_unlock; 539 if (preempt_client_init(i915, &b)) 540 goto err_client_a; 541 542 for_each_engine(engine, i915, id) { 543 struct i915_request *rq_a, *rq_b; 544 int depth; 545 546 if (!intel_engine_has_preemption(engine)) 547 continue; 548 549 engine->execlists.preempt_hang.count = 0; 550 551 rq_a = igt_spinner_create_request(&a.spin, 552 a.ctx, engine, 553 MI_NOOP); 554 if (IS_ERR(rq_a)) { 555 err = PTR_ERR(rq_a); 556 goto err_client_b; 557 } 558 559 i915_request_add(rq_a); 560 if (!igt_wait_for_spinner(&a.spin, rq_a)) { 561 pr_err("First client failed to start\n"); 562 goto err_wedged; 563 } 564 565 for (depth = 0; depth < 8; depth++) { 566 rq_b = igt_spinner_create_request(&b.spin, 567 b.ctx, engine, 568 MI_NOOP); 569 if (IS_ERR(rq_b)) { 570 err = PTR_ERR(rq_b); 571 goto err_client_b; 572 } 573 i915_request_add(rq_b); 574 575 GEM_BUG_ON(i915_request_completed(rq_a)); 576 engine->schedule(rq_a, &attr); 577 igt_spinner_end(&a.spin); 578 579 if (!igt_wait_for_spinner(&b.spin, rq_b)) { 580 pr_err("Second client failed to start\n"); 581 goto err_wedged; 582 } 583 584 swap(a, b); 585 rq_a = rq_b; 586 } 587 igt_spinner_end(&a.spin); 588 589 if (engine->execlists.preempt_hang.count) { 590 pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n", 591 engine->execlists.preempt_hang.count, 592 depth); 593 err = -EINVAL; 594 goto err_client_b; 595 } 596 597 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 598 goto err_wedged; 599 } 600 601 err = 0; 602 err_client_b: 603 preempt_client_fini(&b); 604 err_client_a: 605 preempt_client_fini(&a); 606 err_unlock: 607 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 608 err = -EIO; 609 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 610 mutex_unlock(&i915->drm.struct_mutex); 611 return err; 612 613 err_wedged: 614 igt_spinner_end(&b.spin); 615 igt_spinner_end(&a.spin); 616 i915_gem_set_wedged(i915); 617 err = -EIO; 618 goto err_client_b; 619 } 620 621 static int __i915_sw_fence_call 622 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) 623 { 624 return NOTIFY_DONE; 625 } 626 627 static struct i915_request *dummy_request(struct intel_engine_cs *engine) 628 { 629 struct i915_request *rq; 630 631 rq = kzalloc(sizeof(*rq), GFP_KERNEL); 632 if (!rq) 633 return NULL; 634 635 INIT_LIST_HEAD(&rq->active_list); 636 rq->engine = engine; 637 638 i915_sched_node_init(&rq->sched); 639 640 /* mark this request as permanently incomplete */ 641 rq->fence.seqno = 1; 642 BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */ 643 rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1; 644 GEM_BUG_ON(i915_request_completed(rq)); 645 646 i915_sw_fence_init(&rq->submit, dummy_notify); 647 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 648 649 return rq; 650 } 651 652 static void dummy_request_free(struct i915_request *dummy) 653 { 654 /* We have to fake the CS interrupt to kick the next request */ 655 i915_sw_fence_commit(&dummy->submit); 656 657 i915_request_mark_complete(dummy); 658 dma_fence_signal(&dummy->fence); 659 660 i915_sched_node_fini(&dummy->sched); 661 i915_sw_fence_fini(&dummy->submit); 662 663 dma_fence_free(&dummy->fence); 664 } 665 666 static int live_suppress_wait_preempt(void *arg) 667 { 668 struct drm_i915_private *i915 = arg; 669 struct preempt_client client[4]; 670 struct intel_engine_cs *engine; 671 enum intel_engine_id id; 672 intel_wakeref_t wakeref; 673 int err = -ENOMEM; 674 int i; 675 676 /* 677 * Waiters are given a little priority nudge, but not enough 678 * to actually cause any preemption. Double check that we do 679 * not needlessly generate preempt-to-idle cycles. 680 */ 681 682 if (!HAS_LOGICAL_RING_PREEMPTION(i915)) 683 return 0; 684 685 mutex_lock(&i915->drm.struct_mutex); 686 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 687 688 if (preempt_client_init(i915, &client[0])) /* ELSP[0] */ 689 goto err_unlock; 690 if (preempt_client_init(i915, &client[1])) /* ELSP[1] */ 691 goto err_client_0; 692 if (preempt_client_init(i915, &client[2])) /* head of queue */ 693 goto err_client_1; 694 if (preempt_client_init(i915, &client[3])) /* bystander */ 695 goto err_client_2; 696 697 for_each_engine(engine, i915, id) { 698 int depth; 699 700 if (!intel_engine_has_preemption(engine)) 701 continue; 702 703 if (!engine->emit_init_breadcrumb) 704 continue; 705 706 for (depth = 0; depth < ARRAY_SIZE(client); depth++) { 707 struct i915_request *rq[ARRAY_SIZE(client)]; 708 struct i915_request *dummy; 709 710 engine->execlists.preempt_hang.count = 0; 711 712 dummy = dummy_request(engine); 713 if (!dummy) 714 goto err_client_3; 715 716 for (i = 0; i < ARRAY_SIZE(client); i++) { 717 rq[i] = igt_spinner_create_request(&client[i].spin, 718 client[i].ctx, engine, 719 MI_NOOP); 720 if (IS_ERR(rq[i])) { 721 err = PTR_ERR(rq[i]); 722 goto err_wedged; 723 } 724 725 /* Disable NEWCLIENT promotion */ 726 __i915_active_request_set(&rq[i]->timeline->last_request, 727 dummy); 728 i915_request_add(rq[i]); 729 } 730 731 dummy_request_free(dummy); 732 733 GEM_BUG_ON(i915_request_completed(rq[0])); 734 if (!igt_wait_for_spinner(&client[0].spin, rq[0])) { 735 pr_err("%s: First client failed to start\n", 736 engine->name); 737 goto err_wedged; 738 } 739 GEM_BUG_ON(!i915_request_started(rq[0])); 740 741 if (i915_request_wait(rq[depth], 742 I915_WAIT_PRIORITY, 743 1) != -ETIME) { 744 pr_err("%s: Waiter depth:%d completed!\n", 745 engine->name, depth); 746 goto err_wedged; 747 } 748 749 for (i = 0; i < ARRAY_SIZE(client); i++) 750 igt_spinner_end(&client[i].spin); 751 752 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 753 goto err_wedged; 754 755 if (engine->execlists.preempt_hang.count) { 756 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n", 757 engine->name, 758 engine->execlists.preempt_hang.count, 759 depth); 760 err = -EINVAL; 761 goto err_client_3; 762 } 763 } 764 } 765 766 err = 0; 767 err_client_3: 768 preempt_client_fini(&client[3]); 769 err_client_2: 770 preempt_client_fini(&client[2]); 771 err_client_1: 772 preempt_client_fini(&client[1]); 773 err_client_0: 774 preempt_client_fini(&client[0]); 775 err_unlock: 776 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 777 err = -EIO; 778 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 779 mutex_unlock(&i915->drm.struct_mutex); 780 return err; 781 782 err_wedged: 783 for (i = 0; i < ARRAY_SIZE(client); i++) 784 igt_spinner_end(&client[i].spin); 785 i915_gem_set_wedged(i915); 786 err = -EIO; 787 goto err_client_3; 788 } 789 790 static int live_chain_preempt(void *arg) 791 { 792 struct drm_i915_private *i915 = arg; 793 struct intel_engine_cs *engine; 794 struct preempt_client hi, lo; 795 enum intel_engine_id id; 796 intel_wakeref_t wakeref; 797 int err = -ENOMEM; 798 799 /* 800 * Build a chain AB...BA between two contexts (A, B) and request 801 * preemption of the last request. It should then complete before 802 * the previously submitted spinner in B. 803 */ 804 805 if (!HAS_LOGICAL_RING_PREEMPTION(i915)) 806 return 0; 807 808 mutex_lock(&i915->drm.struct_mutex); 809 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 810 811 if (preempt_client_init(i915, &hi)) 812 goto err_unlock; 813 814 if (preempt_client_init(i915, &lo)) 815 goto err_client_hi; 816 817 for_each_engine(engine, i915, id) { 818 struct i915_sched_attr attr = { 819 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), 820 }; 821 struct igt_live_test t; 822 struct i915_request *rq; 823 int ring_size, count, i; 824 825 if (!intel_engine_has_preemption(engine)) 826 continue; 827 828 rq = igt_spinner_create_request(&lo.spin, 829 lo.ctx, engine, 830 MI_ARB_CHECK); 831 if (IS_ERR(rq)) 832 goto err_wedged; 833 i915_request_add(rq); 834 835 ring_size = rq->wa_tail - rq->head; 836 if (ring_size < 0) 837 ring_size += rq->ring->size; 838 ring_size = rq->ring->size / ring_size; 839 pr_debug("%s(%s): Using maximum of %d requests\n", 840 __func__, engine->name, ring_size); 841 842 igt_spinner_end(&lo.spin); 843 if (i915_request_wait(rq, 0, HZ / 2) < 0) { 844 pr_err("Timed out waiting to flush %s\n", engine->name); 845 goto err_wedged; 846 } 847 848 if (igt_live_test_begin(&t, i915, __func__, engine->name)) { 849 err = -EIO; 850 goto err_wedged; 851 } 852 853 for_each_prime_number_from(count, 1, ring_size) { 854 rq = igt_spinner_create_request(&hi.spin, 855 hi.ctx, engine, 856 MI_ARB_CHECK); 857 if (IS_ERR(rq)) 858 goto err_wedged; 859 i915_request_add(rq); 860 if (!igt_wait_for_spinner(&hi.spin, rq)) 861 goto err_wedged; 862 863 rq = igt_spinner_create_request(&lo.spin, 864 lo.ctx, engine, 865 MI_ARB_CHECK); 866 if (IS_ERR(rq)) 867 goto err_wedged; 868 i915_request_add(rq); 869 870 for (i = 0; i < count; i++) { 871 rq = igt_request_alloc(lo.ctx, engine); 872 if (IS_ERR(rq)) 873 goto err_wedged; 874 i915_request_add(rq); 875 } 876 877 rq = igt_request_alloc(hi.ctx, engine); 878 if (IS_ERR(rq)) 879 goto err_wedged; 880 i915_request_add(rq); 881 engine->schedule(rq, &attr); 882 883 igt_spinner_end(&hi.spin); 884 if (i915_request_wait(rq, 0, HZ / 5) < 0) { 885 struct drm_printer p = 886 drm_info_printer(i915->drm.dev); 887 888 pr_err("Failed to preempt over chain of %d\n", 889 count); 890 intel_engine_dump(engine, &p, 891 "%s\n", engine->name); 892 goto err_wedged; 893 } 894 igt_spinner_end(&lo.spin); 895 896 rq = igt_request_alloc(lo.ctx, engine); 897 if (IS_ERR(rq)) 898 goto err_wedged; 899 i915_request_add(rq); 900 if (i915_request_wait(rq, 0, HZ / 5) < 0) { 901 struct drm_printer p = 902 drm_info_printer(i915->drm.dev); 903 904 pr_err("Failed to flush low priority chain of %d requests\n", 905 count); 906 intel_engine_dump(engine, &p, 907 "%s\n", engine->name); 908 goto err_wedged; 909 } 910 } 911 912 if (igt_live_test_end(&t)) { 913 err = -EIO; 914 goto err_wedged; 915 } 916 } 917 918 err = 0; 919 err_client_lo: 920 preempt_client_fini(&lo); 921 err_client_hi: 922 preempt_client_fini(&hi); 923 err_unlock: 924 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 925 err = -EIO; 926 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 927 mutex_unlock(&i915->drm.struct_mutex); 928 return err; 929 930 err_wedged: 931 igt_spinner_end(&hi.spin); 932 igt_spinner_end(&lo.spin); 933 i915_gem_set_wedged(i915); 934 err = -EIO; 935 goto err_client_lo; 936 } 937 938 static int live_preempt_hang(void *arg) 939 { 940 struct drm_i915_private *i915 = arg; 941 struct i915_gem_context *ctx_hi, *ctx_lo; 942 struct igt_spinner spin_hi, spin_lo; 943 struct intel_engine_cs *engine; 944 enum intel_engine_id id; 945 intel_wakeref_t wakeref; 946 int err = -ENOMEM; 947 948 if (!HAS_LOGICAL_RING_PREEMPTION(i915)) 949 return 0; 950 951 if (!intel_has_reset_engine(i915)) 952 return 0; 953 954 mutex_lock(&i915->drm.struct_mutex); 955 wakeref = intel_runtime_pm_get(&i915->runtime_pm); 956 957 if (igt_spinner_init(&spin_hi, i915)) 958 goto err_unlock; 959 960 if (igt_spinner_init(&spin_lo, i915)) 961 goto err_spin_hi; 962 963 ctx_hi = kernel_context(i915); 964 if (!ctx_hi) 965 goto err_spin_lo; 966 ctx_hi->sched.priority = 967 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); 968 969 ctx_lo = kernel_context(i915); 970 if (!ctx_lo) 971 goto err_ctx_hi; 972 ctx_lo->sched.priority = 973 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); 974 975 for_each_engine(engine, i915, id) { 976 struct i915_request *rq; 977 978 if (!intel_engine_has_preemption(engine)) 979 continue; 980 981 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, 982 MI_ARB_CHECK); 983 if (IS_ERR(rq)) { 984 err = PTR_ERR(rq); 985 goto err_ctx_lo; 986 } 987 988 i915_request_add(rq); 989 if (!igt_wait_for_spinner(&spin_lo, rq)) { 990 GEM_TRACE("lo spinner failed to start\n"); 991 GEM_TRACE_DUMP(); 992 i915_gem_set_wedged(i915); 993 err = -EIO; 994 goto err_ctx_lo; 995 } 996 997 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, 998 MI_ARB_CHECK); 999 if (IS_ERR(rq)) { 1000 igt_spinner_end(&spin_lo); 1001 err = PTR_ERR(rq); 1002 goto err_ctx_lo; 1003 } 1004 1005 init_completion(&engine->execlists.preempt_hang.completion); 1006 engine->execlists.preempt_hang.inject_hang = true; 1007 1008 i915_request_add(rq); 1009 1010 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion, 1011 HZ / 10)) { 1012 pr_err("Preemption did not occur within timeout!"); 1013 GEM_TRACE_DUMP(); 1014 i915_gem_set_wedged(i915); 1015 err = -EIO; 1016 goto err_ctx_lo; 1017 } 1018 1019 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); 1020 i915_reset_engine(engine, NULL); 1021 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); 1022 1023 engine->execlists.preempt_hang.inject_hang = false; 1024 1025 if (!igt_wait_for_spinner(&spin_hi, rq)) { 1026 GEM_TRACE("hi spinner failed to start\n"); 1027 GEM_TRACE_DUMP(); 1028 i915_gem_set_wedged(i915); 1029 err = -EIO; 1030 goto err_ctx_lo; 1031 } 1032 1033 igt_spinner_end(&spin_hi); 1034 igt_spinner_end(&spin_lo); 1035 if (igt_flush_test(i915, I915_WAIT_LOCKED)) { 1036 err = -EIO; 1037 goto err_ctx_lo; 1038 } 1039 } 1040 1041 err = 0; 1042 err_ctx_lo: 1043 kernel_context_close(ctx_lo); 1044 err_ctx_hi: 1045 kernel_context_close(ctx_hi); 1046 err_spin_lo: 1047 igt_spinner_fini(&spin_lo); 1048 err_spin_hi: 1049 igt_spinner_fini(&spin_hi); 1050 err_unlock: 1051 igt_flush_test(i915, I915_WAIT_LOCKED); 1052 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1053 mutex_unlock(&i915->drm.struct_mutex); 1054 return err; 1055 } 1056 1057 static int random_range(struct rnd_state *rnd, int min, int max) 1058 { 1059 return i915_prandom_u32_max_state(max - min, rnd) + min; 1060 } 1061 1062 static int random_priority(struct rnd_state *rnd) 1063 { 1064 return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX); 1065 } 1066 1067 struct preempt_smoke { 1068 struct drm_i915_private *i915; 1069 struct i915_gem_context **contexts; 1070 struct intel_engine_cs *engine; 1071 struct drm_i915_gem_object *batch; 1072 unsigned int ncontext; 1073 struct rnd_state prng; 1074 unsigned long count; 1075 }; 1076 1077 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) 1078 { 1079 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, 1080 &smoke->prng)]; 1081 } 1082 1083 static int smoke_submit(struct preempt_smoke *smoke, 1084 struct i915_gem_context *ctx, int prio, 1085 struct drm_i915_gem_object *batch) 1086 { 1087 struct i915_request *rq; 1088 struct i915_vma *vma = NULL; 1089 int err = 0; 1090 1091 if (batch) { 1092 vma = i915_vma_instance(batch, ctx->vm, NULL); 1093 if (IS_ERR(vma)) 1094 return PTR_ERR(vma); 1095 1096 err = i915_vma_pin(vma, 0, 0, PIN_USER); 1097 if (err) 1098 return err; 1099 } 1100 1101 ctx->sched.priority = prio; 1102 1103 rq = igt_request_alloc(ctx, smoke->engine); 1104 if (IS_ERR(rq)) { 1105 err = PTR_ERR(rq); 1106 goto unpin; 1107 } 1108 1109 if (vma) { 1110 i915_vma_lock(vma); 1111 err = rq->engine->emit_bb_start(rq, 1112 vma->node.start, 1113 PAGE_SIZE, 0); 1114 if (!err) 1115 err = i915_vma_move_to_active(vma, rq, 0); 1116 i915_vma_unlock(vma); 1117 } 1118 1119 i915_request_add(rq); 1120 1121 unpin: 1122 if (vma) 1123 i915_vma_unpin(vma); 1124 1125 return err; 1126 } 1127 1128 static int smoke_crescendo_thread(void *arg) 1129 { 1130 struct preempt_smoke *smoke = arg; 1131 IGT_TIMEOUT(end_time); 1132 unsigned long count; 1133 1134 count = 0; 1135 do { 1136 struct i915_gem_context *ctx = smoke_context(smoke); 1137 int err; 1138 1139 mutex_lock(&smoke->i915->drm.struct_mutex); 1140 err = smoke_submit(smoke, 1141 ctx, count % I915_PRIORITY_MAX, 1142 smoke->batch); 1143 mutex_unlock(&smoke->i915->drm.struct_mutex); 1144 if (err) 1145 return err; 1146 1147 count++; 1148 } while (!__igt_timeout(end_time, NULL)); 1149 1150 smoke->count = count; 1151 return 0; 1152 } 1153 1154 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) 1155 #define BATCH BIT(0) 1156 { 1157 struct task_struct *tsk[I915_NUM_ENGINES] = {}; 1158 struct preempt_smoke arg[I915_NUM_ENGINES]; 1159 struct intel_engine_cs *engine; 1160 enum intel_engine_id id; 1161 unsigned long count; 1162 int err = 0; 1163 1164 mutex_unlock(&smoke->i915->drm.struct_mutex); 1165 1166 for_each_engine(engine, smoke->i915, id) { 1167 arg[id] = *smoke; 1168 arg[id].engine = engine; 1169 if (!(flags & BATCH)) 1170 arg[id].batch = NULL; 1171 arg[id].count = 0; 1172 1173 tsk[id] = kthread_run(smoke_crescendo_thread, &arg, 1174 "igt/smoke:%d", id); 1175 if (IS_ERR(tsk[id])) { 1176 err = PTR_ERR(tsk[id]); 1177 break; 1178 } 1179 get_task_struct(tsk[id]); 1180 } 1181 1182 count = 0; 1183 for_each_engine(engine, smoke->i915, id) { 1184 int status; 1185 1186 if (IS_ERR_OR_NULL(tsk[id])) 1187 continue; 1188 1189 status = kthread_stop(tsk[id]); 1190 if (status && !err) 1191 err = status; 1192 1193 count += arg[id].count; 1194 1195 put_task_struct(tsk[id]); 1196 } 1197 1198 mutex_lock(&smoke->i915->drm.struct_mutex); 1199 1200 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", 1201 count, flags, 1202 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); 1203 return 0; 1204 } 1205 1206 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) 1207 { 1208 enum intel_engine_id id; 1209 IGT_TIMEOUT(end_time); 1210 unsigned long count; 1211 1212 count = 0; 1213 do { 1214 for_each_engine(smoke->engine, smoke->i915, id) { 1215 struct i915_gem_context *ctx = smoke_context(smoke); 1216 int err; 1217 1218 err = smoke_submit(smoke, 1219 ctx, random_priority(&smoke->prng), 1220 flags & BATCH ? smoke->batch : NULL); 1221 if (err) 1222 return err; 1223 1224 count++; 1225 } 1226 } while (!__igt_timeout(end_time, NULL)); 1227 1228 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", 1229 count, flags, 1230 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); 1231 return 0; 1232 } 1233 1234 static int live_preempt_smoke(void *arg) 1235 { 1236 struct preempt_smoke smoke = { 1237 .i915 = arg, 1238 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), 1239 .ncontext = 1024, 1240 }; 1241 const unsigned int phase[] = { 0, BATCH }; 1242 intel_wakeref_t wakeref; 1243 struct igt_live_test t; 1244 int err = -ENOMEM; 1245 u32 *cs; 1246 int n; 1247 1248 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915)) 1249 return 0; 1250 1251 smoke.contexts = kmalloc_array(smoke.ncontext, 1252 sizeof(*smoke.contexts), 1253 GFP_KERNEL); 1254 if (!smoke.contexts) 1255 return -ENOMEM; 1256 1257 mutex_lock(&smoke.i915->drm.struct_mutex); 1258 wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm); 1259 1260 smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); 1261 if (IS_ERR(smoke.batch)) { 1262 err = PTR_ERR(smoke.batch); 1263 goto err_unlock; 1264 } 1265 1266 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); 1267 if (IS_ERR(cs)) { 1268 err = PTR_ERR(cs); 1269 goto err_batch; 1270 } 1271 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) 1272 cs[n] = MI_ARB_CHECK; 1273 cs[n] = MI_BATCH_BUFFER_END; 1274 i915_gem_object_flush_map(smoke.batch); 1275 i915_gem_object_unpin_map(smoke.batch); 1276 1277 if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) { 1278 err = -EIO; 1279 goto err_batch; 1280 } 1281 1282 for (n = 0; n < smoke.ncontext; n++) { 1283 smoke.contexts[n] = kernel_context(smoke.i915); 1284 if (!smoke.contexts[n]) 1285 goto err_ctx; 1286 } 1287 1288 for (n = 0; n < ARRAY_SIZE(phase); n++) { 1289 err = smoke_crescendo(&smoke, phase[n]); 1290 if (err) 1291 goto err_ctx; 1292 1293 err = smoke_random(&smoke, phase[n]); 1294 if (err) 1295 goto err_ctx; 1296 } 1297 1298 err_ctx: 1299 if (igt_live_test_end(&t)) 1300 err = -EIO; 1301 1302 for (n = 0; n < smoke.ncontext; n++) { 1303 if (!smoke.contexts[n]) 1304 break; 1305 kernel_context_close(smoke.contexts[n]); 1306 } 1307 1308 err_batch: 1309 i915_gem_object_put(smoke.batch); 1310 err_unlock: 1311 intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref); 1312 mutex_unlock(&smoke.i915->drm.struct_mutex); 1313 kfree(smoke.contexts); 1314 1315 return err; 1316 } 1317 1318 static int nop_virtual_engine(struct drm_i915_private *i915, 1319 struct intel_engine_cs **siblings, 1320 unsigned int nsibling, 1321 unsigned int nctx, 1322 unsigned int flags) 1323 #define CHAIN BIT(0) 1324 { 1325 IGT_TIMEOUT(end_time); 1326 struct i915_request *request[16]; 1327 struct i915_gem_context *ctx[16]; 1328 struct intel_context *ve[16]; 1329 unsigned long n, prime, nc; 1330 struct igt_live_test t; 1331 ktime_t times[2] = {}; 1332 int err; 1333 1334 GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx)); 1335 1336 for (n = 0; n < nctx; n++) { 1337 ctx[n] = kernel_context(i915); 1338 if (!ctx[n]) { 1339 err = -ENOMEM; 1340 nctx = n; 1341 goto out; 1342 } 1343 1344 ve[n] = intel_execlists_create_virtual(ctx[n], 1345 siblings, nsibling); 1346 if (IS_ERR(ve[n])) { 1347 kernel_context_close(ctx[n]); 1348 err = PTR_ERR(ve[n]); 1349 nctx = n; 1350 goto out; 1351 } 1352 1353 err = intel_context_pin(ve[n]); 1354 if (err) { 1355 intel_context_put(ve[n]); 1356 kernel_context_close(ctx[n]); 1357 nctx = n; 1358 goto out; 1359 } 1360 } 1361 1362 err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name); 1363 if (err) 1364 goto out; 1365 1366 for_each_prime_number_from(prime, 1, 8192) { 1367 times[1] = ktime_get_raw(); 1368 1369 if (flags & CHAIN) { 1370 for (nc = 0; nc < nctx; nc++) { 1371 for (n = 0; n < prime; n++) { 1372 request[nc] = 1373 i915_request_create(ve[nc]); 1374 if (IS_ERR(request[nc])) { 1375 err = PTR_ERR(request[nc]); 1376 goto out; 1377 } 1378 1379 i915_request_add(request[nc]); 1380 } 1381 } 1382 } else { 1383 for (n = 0; n < prime; n++) { 1384 for (nc = 0; nc < nctx; nc++) { 1385 request[nc] = 1386 i915_request_create(ve[nc]); 1387 if (IS_ERR(request[nc])) { 1388 err = PTR_ERR(request[nc]); 1389 goto out; 1390 } 1391 1392 i915_request_add(request[nc]); 1393 } 1394 } 1395 } 1396 1397 for (nc = 0; nc < nctx; nc++) { 1398 if (i915_request_wait(request[nc], 0, HZ / 10) < 0) { 1399 pr_err("%s(%s): wait for %llx:%lld timed out\n", 1400 __func__, ve[0]->engine->name, 1401 request[nc]->fence.context, 1402 request[nc]->fence.seqno); 1403 1404 GEM_TRACE("%s(%s) failed at request %llx:%lld\n", 1405 __func__, ve[0]->engine->name, 1406 request[nc]->fence.context, 1407 request[nc]->fence.seqno); 1408 GEM_TRACE_DUMP(); 1409 i915_gem_set_wedged(i915); 1410 break; 1411 } 1412 } 1413 1414 times[1] = ktime_sub(ktime_get_raw(), times[1]); 1415 if (prime == 1) 1416 times[0] = times[1]; 1417 1418 if (__igt_timeout(end_time, NULL)) 1419 break; 1420 } 1421 1422 err = igt_live_test_end(&t); 1423 if (err) 1424 goto out; 1425 1426 pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n", 1427 nctx, ve[0]->engine->name, ktime_to_ns(times[0]), 1428 prime, div64_u64(ktime_to_ns(times[1]), prime)); 1429 1430 out: 1431 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 1432 err = -EIO; 1433 1434 for (nc = 0; nc < nctx; nc++) { 1435 intel_context_unpin(ve[nc]); 1436 intel_context_put(ve[nc]); 1437 kernel_context_close(ctx[nc]); 1438 } 1439 return err; 1440 } 1441 1442 static int live_virtual_engine(void *arg) 1443 { 1444 struct drm_i915_private *i915 = arg; 1445 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; 1446 struct intel_engine_cs *engine; 1447 enum intel_engine_id id; 1448 unsigned int class, inst; 1449 int err = -ENODEV; 1450 1451 if (USES_GUC_SUBMISSION(i915)) 1452 return 0; 1453 1454 mutex_lock(&i915->drm.struct_mutex); 1455 1456 for_each_engine(engine, i915, id) { 1457 err = nop_virtual_engine(i915, &engine, 1, 1, 0); 1458 if (err) { 1459 pr_err("Failed to wrap engine %s: err=%d\n", 1460 engine->name, err); 1461 goto out_unlock; 1462 } 1463 } 1464 1465 for (class = 0; class <= MAX_ENGINE_CLASS; class++) { 1466 int nsibling, n; 1467 1468 nsibling = 0; 1469 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { 1470 if (!i915->engine_class[class][inst]) 1471 continue; 1472 1473 siblings[nsibling++] = i915->engine_class[class][inst]; 1474 } 1475 if (nsibling < 2) 1476 continue; 1477 1478 for (n = 1; n <= nsibling + 1; n++) { 1479 err = nop_virtual_engine(i915, siblings, nsibling, 1480 n, 0); 1481 if (err) 1482 goto out_unlock; 1483 } 1484 1485 err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN); 1486 if (err) 1487 goto out_unlock; 1488 } 1489 1490 out_unlock: 1491 mutex_unlock(&i915->drm.struct_mutex); 1492 return err; 1493 } 1494 1495 static int mask_virtual_engine(struct drm_i915_private *i915, 1496 struct intel_engine_cs **siblings, 1497 unsigned int nsibling) 1498 { 1499 struct i915_request *request[MAX_ENGINE_INSTANCE + 1]; 1500 struct i915_gem_context *ctx; 1501 struct intel_context *ve; 1502 struct igt_live_test t; 1503 unsigned int n; 1504 int err; 1505 1506 /* 1507 * Check that by setting the execution mask on a request, we can 1508 * restrict it to our desired engine within the virtual engine. 1509 */ 1510 1511 ctx = kernel_context(i915); 1512 if (!ctx) 1513 return -ENOMEM; 1514 1515 ve = intel_execlists_create_virtual(ctx, siblings, nsibling); 1516 if (IS_ERR(ve)) { 1517 err = PTR_ERR(ve); 1518 goto out_close; 1519 } 1520 1521 err = intel_context_pin(ve); 1522 if (err) 1523 goto out_put; 1524 1525 err = igt_live_test_begin(&t, i915, __func__, ve->engine->name); 1526 if (err) 1527 goto out_unpin; 1528 1529 for (n = 0; n < nsibling; n++) { 1530 request[n] = i915_request_create(ve); 1531 if (IS_ERR(request[n])) { 1532 err = PTR_ERR(request[n]); 1533 nsibling = n; 1534 goto out; 1535 } 1536 1537 /* Reverse order as it's more likely to be unnatural */ 1538 request[n]->execution_mask = siblings[nsibling - n - 1]->mask; 1539 1540 i915_request_get(request[n]); 1541 i915_request_add(request[n]); 1542 } 1543 1544 for (n = 0; n < nsibling; n++) { 1545 if (i915_request_wait(request[n], 0, HZ / 10) < 0) { 1546 pr_err("%s(%s): wait for %llx:%lld timed out\n", 1547 __func__, ve->engine->name, 1548 request[n]->fence.context, 1549 request[n]->fence.seqno); 1550 1551 GEM_TRACE("%s(%s) failed at request %llx:%lld\n", 1552 __func__, ve->engine->name, 1553 request[n]->fence.context, 1554 request[n]->fence.seqno); 1555 GEM_TRACE_DUMP(); 1556 i915_gem_set_wedged(i915); 1557 err = -EIO; 1558 goto out; 1559 } 1560 1561 if (request[n]->engine != siblings[nsibling - n - 1]) { 1562 pr_err("Executed on wrong sibling '%s', expected '%s'\n", 1563 request[n]->engine->name, 1564 siblings[nsibling - n - 1]->name); 1565 err = -EINVAL; 1566 goto out; 1567 } 1568 } 1569 1570 err = igt_live_test_end(&t); 1571 if (err) 1572 goto out; 1573 1574 out: 1575 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 1576 err = -EIO; 1577 1578 for (n = 0; n < nsibling; n++) 1579 i915_request_put(request[n]); 1580 1581 out_unpin: 1582 intel_context_unpin(ve); 1583 out_put: 1584 intel_context_put(ve); 1585 out_close: 1586 kernel_context_close(ctx); 1587 return err; 1588 } 1589 1590 static int live_virtual_mask(void *arg) 1591 { 1592 struct drm_i915_private *i915 = arg; 1593 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; 1594 unsigned int class, inst; 1595 int err = 0; 1596 1597 if (USES_GUC_SUBMISSION(i915)) 1598 return 0; 1599 1600 mutex_lock(&i915->drm.struct_mutex); 1601 1602 for (class = 0; class <= MAX_ENGINE_CLASS; class++) { 1603 unsigned int nsibling; 1604 1605 nsibling = 0; 1606 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { 1607 if (!i915->engine_class[class][inst]) 1608 break; 1609 1610 siblings[nsibling++] = i915->engine_class[class][inst]; 1611 } 1612 if (nsibling < 2) 1613 continue; 1614 1615 err = mask_virtual_engine(i915, siblings, nsibling); 1616 if (err) 1617 goto out_unlock; 1618 } 1619 1620 out_unlock: 1621 mutex_unlock(&i915->drm.struct_mutex); 1622 return err; 1623 } 1624 1625 static int bond_virtual_engine(struct drm_i915_private *i915, 1626 unsigned int class, 1627 struct intel_engine_cs **siblings, 1628 unsigned int nsibling, 1629 unsigned int flags) 1630 #define BOND_SCHEDULE BIT(0) 1631 { 1632 struct intel_engine_cs *master; 1633 struct i915_gem_context *ctx; 1634 struct i915_request *rq[16]; 1635 enum intel_engine_id id; 1636 unsigned long n; 1637 int err; 1638 1639 GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1); 1640 1641 ctx = kernel_context(i915); 1642 if (!ctx) 1643 return -ENOMEM; 1644 1645 err = 0; 1646 rq[0] = ERR_PTR(-ENOMEM); 1647 for_each_engine(master, i915, id) { 1648 struct i915_sw_fence fence = {}; 1649 1650 if (master->class == class) 1651 continue; 1652 1653 memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq)); 1654 1655 rq[0] = igt_request_alloc(ctx, master); 1656 if (IS_ERR(rq[0])) { 1657 err = PTR_ERR(rq[0]); 1658 goto out; 1659 } 1660 i915_request_get(rq[0]); 1661 1662 if (flags & BOND_SCHEDULE) { 1663 onstack_fence_init(&fence); 1664 err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit, 1665 &fence, 1666 GFP_KERNEL); 1667 } 1668 i915_request_add(rq[0]); 1669 if (err < 0) 1670 goto out; 1671 1672 for (n = 0; n < nsibling; n++) { 1673 struct intel_context *ve; 1674 1675 ve = intel_execlists_create_virtual(ctx, 1676 siblings, 1677 nsibling); 1678 if (IS_ERR(ve)) { 1679 err = PTR_ERR(ve); 1680 onstack_fence_fini(&fence); 1681 goto out; 1682 } 1683 1684 err = intel_virtual_engine_attach_bond(ve->engine, 1685 master, 1686 siblings[n]); 1687 if (err) { 1688 intel_context_put(ve); 1689 onstack_fence_fini(&fence); 1690 goto out; 1691 } 1692 1693 err = intel_context_pin(ve); 1694 intel_context_put(ve); 1695 if (err) { 1696 onstack_fence_fini(&fence); 1697 goto out; 1698 } 1699 1700 rq[n + 1] = i915_request_create(ve); 1701 intel_context_unpin(ve); 1702 if (IS_ERR(rq[n + 1])) { 1703 err = PTR_ERR(rq[n + 1]); 1704 onstack_fence_fini(&fence); 1705 goto out; 1706 } 1707 i915_request_get(rq[n + 1]); 1708 1709 err = i915_request_await_execution(rq[n + 1], 1710 &rq[0]->fence, 1711 ve->engine->bond_execute); 1712 i915_request_add(rq[n + 1]); 1713 if (err < 0) { 1714 onstack_fence_fini(&fence); 1715 goto out; 1716 } 1717 } 1718 onstack_fence_fini(&fence); 1719 1720 if (i915_request_wait(rq[0], 0, HZ / 10) < 0) { 1721 pr_err("Master request did not execute (on %s)!\n", 1722 rq[0]->engine->name); 1723 err = -EIO; 1724 goto out; 1725 } 1726 1727 for (n = 0; n < nsibling; n++) { 1728 if (i915_request_wait(rq[n + 1], 0, 1729 MAX_SCHEDULE_TIMEOUT) < 0) { 1730 err = -EIO; 1731 goto out; 1732 } 1733 1734 if (rq[n + 1]->engine != siblings[n]) { 1735 pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n", 1736 siblings[n]->name, 1737 rq[n + 1]->engine->name, 1738 rq[0]->engine->name); 1739 err = -EINVAL; 1740 goto out; 1741 } 1742 } 1743 1744 for (n = 0; !IS_ERR(rq[n]); n++) 1745 i915_request_put(rq[n]); 1746 rq[0] = ERR_PTR(-ENOMEM); 1747 } 1748 1749 out: 1750 for (n = 0; !IS_ERR(rq[n]); n++) 1751 i915_request_put(rq[n]); 1752 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 1753 err = -EIO; 1754 1755 kernel_context_close(ctx); 1756 return err; 1757 } 1758 1759 static int live_virtual_bond(void *arg) 1760 { 1761 static const struct phase { 1762 const char *name; 1763 unsigned int flags; 1764 } phases[] = { 1765 { "", 0 }, 1766 { "schedule", BOND_SCHEDULE }, 1767 { }, 1768 }; 1769 struct drm_i915_private *i915 = arg; 1770 struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; 1771 unsigned int class, inst; 1772 int err = 0; 1773 1774 if (USES_GUC_SUBMISSION(i915)) 1775 return 0; 1776 1777 mutex_lock(&i915->drm.struct_mutex); 1778 1779 for (class = 0; class <= MAX_ENGINE_CLASS; class++) { 1780 const struct phase *p; 1781 int nsibling; 1782 1783 nsibling = 0; 1784 for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { 1785 if (!i915->engine_class[class][inst]) 1786 break; 1787 1788 GEM_BUG_ON(nsibling == ARRAY_SIZE(siblings)); 1789 siblings[nsibling++] = i915->engine_class[class][inst]; 1790 } 1791 if (nsibling < 2) 1792 continue; 1793 1794 for (p = phases; p->name; p++) { 1795 err = bond_virtual_engine(i915, 1796 class, siblings, nsibling, 1797 p->flags); 1798 if (err) { 1799 pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n", 1800 __func__, p->name, class, nsibling, err); 1801 goto out_unlock; 1802 } 1803 } 1804 } 1805 1806 out_unlock: 1807 mutex_unlock(&i915->drm.struct_mutex); 1808 return err; 1809 } 1810 1811 int intel_execlists_live_selftests(struct drm_i915_private *i915) 1812 { 1813 static const struct i915_subtest tests[] = { 1814 SUBTEST(live_sanitycheck), 1815 SUBTEST(live_busywait_preempt), 1816 SUBTEST(live_preempt), 1817 SUBTEST(live_late_preempt), 1818 SUBTEST(live_suppress_self_preempt), 1819 SUBTEST(live_suppress_wait_preempt), 1820 SUBTEST(live_chain_preempt), 1821 SUBTEST(live_preempt_hang), 1822 SUBTEST(live_preempt_smoke), 1823 SUBTEST(live_virtual_engine), 1824 SUBTEST(live_virtual_mask), 1825 SUBTEST(live_virtual_bond), 1826 }; 1827 1828 if (!HAS_EXECLISTS(i915)) 1829 return 0; 1830 1831 if (i915_terminally_wedged(i915)) 1832 return 0; 1833 1834 return i915_subtests(tests, i915); 1835 } 1836