1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2008-2021 Intel Corporation 4 */ 5 6 #include "gem/i915_gem_internal.h" 7 8 #include "gen2_engine_cs.h" 9 #include "gen6_engine_cs.h" 10 #include "gen6_ppgtt.h" 11 #include "gen7_renderclear.h" 12 #include "i915_drv.h" 13 #include "i915_mitigations.h" 14 #include "intel_breadcrumbs.h" 15 #include "intel_context.h" 16 #include "intel_engine_regs.h" 17 #include "intel_gt.h" 18 #include "intel_gt_irq.h" 19 #include "intel_gt_regs.h" 20 #include "intel_reset.h" 21 #include "intel_ring.h" 22 #include "shmem_utils.h" 23 #include "intel_engine_heartbeat.h" 24 #include "intel_engine_pm.h" 25 26 /* Rough estimate of the typical request size, performing a flush, 27 * set-context and then emitting the batch. 28 */ 29 #define LEGACY_REQUEST_SIZE 200 30 31 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 32 { 33 /* 34 * Keep the render interrupt unmasked as this papers over 35 * lost interrupts following a reset. 36 */ 37 if (engine->class == RENDER_CLASS) { 38 if (GRAPHICS_VER(engine->i915) >= 6) 39 mask &= ~BIT(0); 40 else 41 mask &= ~I915_USER_INTERRUPT; 42 } 43 44 intel_engine_set_hwsp_writemask(engine, mask); 45 } 46 47 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 48 { 49 u32 addr; 50 51 addr = lower_32_bits(phys); 52 if (GRAPHICS_VER(engine->i915) >= 4) 53 addr |= (phys >> 28) & 0xf0; 54 55 intel_uncore_write(engine->uncore, HWS_PGA, addr); 56 } 57 58 static struct page *status_page(struct intel_engine_cs *engine) 59 { 60 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 61 62 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 63 return sg_page(obj->mm.pages->sgl); 64 } 65 66 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 67 { 68 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 69 set_hwstam(engine, ~0u); 70 } 71 72 static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 73 { 74 i915_reg_t hwsp; 75 76 /* 77 * The ring status page addresses are no longer next to the rest of 78 * the ring registers as of gen7. 79 */ 80 if (GRAPHICS_VER(engine->i915) == 7) { 81 switch (engine->id) { 82 /* 83 * No more rings exist on Gen7. Default case is only to shut up 84 * gcc switch check warning. 85 */ 86 default: 87 GEM_BUG_ON(engine->id); 88 fallthrough; 89 case RCS0: 90 hwsp = RENDER_HWS_PGA_GEN7; 91 break; 92 case BCS0: 93 hwsp = BLT_HWS_PGA_GEN7; 94 break; 95 case VCS0: 96 hwsp = BSD_HWS_PGA_GEN7; 97 break; 98 case VECS0: 99 hwsp = VEBOX_HWS_PGA_GEN7; 100 break; 101 } 102 } else if (GRAPHICS_VER(engine->i915) == 6) { 103 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 104 } else { 105 hwsp = RING_HWS_PGA(engine->mmio_base); 106 } 107 108 intel_uncore_write_fw(engine->uncore, hwsp, offset); 109 intel_uncore_posting_read_fw(engine->uncore, hwsp); 110 } 111 112 static void flush_cs_tlb(struct intel_engine_cs *engine) 113 { 114 if (!IS_GRAPHICS_VER(engine->i915, 6, 7)) 115 return; 116 117 /* ring should be idle before issuing a sync flush*/ 118 GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 119 120 ENGINE_WRITE_FW(engine, RING_INSTPM, 121 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 122 INSTPM_SYNC_FLUSH)); 123 if (__intel_wait_for_register_fw(engine->uncore, 124 RING_INSTPM(engine->mmio_base), 125 INSTPM_SYNC_FLUSH, 0, 126 2000, 0, NULL)) 127 ENGINE_TRACE(engine, 128 "wait for SyncFlush to complete for TLB invalidation timed out\n"); 129 } 130 131 static void ring_setup_status_page(struct intel_engine_cs *engine) 132 { 133 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 134 set_hwstam(engine, ~0u); 135 136 flush_cs_tlb(engine); 137 } 138 139 static struct i915_address_space *vm_alias(struct i915_address_space *vm) 140 { 141 if (i915_is_ggtt(vm)) 142 vm = &i915_vm_to_ggtt(vm)->alias->vm; 143 144 return vm; 145 } 146 147 static u32 pp_dir(struct i915_address_space *vm) 148 { 149 return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir; 150 } 151 152 static void set_pp_dir(struct intel_engine_cs *engine) 153 { 154 struct i915_address_space *vm = vm_alias(engine->gt->vm); 155 156 if (!vm) 157 return; 158 159 ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 160 ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm)); 161 162 if (GRAPHICS_VER(engine->i915) >= 7) { 163 ENGINE_WRITE_FW(engine, 164 RING_MODE_GEN7, 165 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 166 } 167 } 168 169 static bool stop_ring(struct intel_engine_cs *engine) 170 { 171 /* Empty the ring by skipping to the end */ 172 ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL)); 173 ENGINE_POSTING_READ(engine, RING_HEAD); 174 175 /* The ring must be empty before it is disabled */ 176 ENGINE_WRITE_FW(engine, RING_CTL, 0); 177 ENGINE_POSTING_READ(engine, RING_CTL); 178 179 /* Then reset the disabled ring */ 180 ENGINE_WRITE_FW(engine, RING_HEAD, 0); 181 ENGINE_WRITE_FW(engine, RING_TAIL, 0); 182 183 return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0; 184 } 185 186 static int xcs_resume(struct intel_engine_cs *engine) 187 { 188 struct intel_ring *ring = engine->legacy.ring; 189 190 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", 191 ring->head, ring->tail); 192 193 /* 194 * Double check the ring is empty & disabled before we resume. Called 195 * from atomic context during PCI probe, so _hardirq(). 196 */ 197 intel_synchronize_hardirq(engine->i915); 198 if (!stop_ring(engine)) 199 goto err; 200 201 if (HWS_NEEDS_PHYSICAL(engine->i915)) 202 ring_setup_phys_status_page(engine); 203 else 204 ring_setup_status_page(engine); 205 206 intel_breadcrumbs_reset(engine->breadcrumbs); 207 208 /* Enforce ordering by reading HEAD register back */ 209 ENGINE_POSTING_READ(engine, RING_HEAD); 210 211 /* 212 * Initialize the ring. This must happen _after_ we've cleared the ring 213 * registers with the above sequence (the readback of the HEAD registers 214 * also enforces ordering), otherwise the hw might lose the new ring 215 * register values. 216 */ 217 ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma)); 218 219 /* Check that the ring offsets point within the ring! */ 220 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 221 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 222 intel_ring_update_space(ring); 223 224 set_pp_dir(engine); 225 226 /* First wake the ring up to an empty/idle ring */ 227 ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); 228 ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); 229 ENGINE_POSTING_READ(engine, RING_TAIL); 230 231 ENGINE_WRITE_FW(engine, RING_CTL, 232 RING_CTL_SIZE(ring->size) | RING_VALID); 233 234 /* If the head is still not zero, the ring is dead */ 235 if (__intel_wait_for_register_fw(engine->uncore, 236 RING_CTL(engine->mmio_base), 237 RING_VALID, RING_VALID, 238 5000, 0, NULL)) 239 goto err; 240 241 if (GRAPHICS_VER(engine->i915) > 2) 242 ENGINE_WRITE_FW(engine, 243 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 244 245 /* Now awake, let it get started */ 246 if (ring->tail != ring->head) { 247 ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail); 248 ENGINE_POSTING_READ(engine, RING_TAIL); 249 } 250 251 /* Papering over lost _interrupts_ immediately following the restart */ 252 intel_engine_signal_breadcrumbs(engine); 253 return 0; 254 255 err: 256 drm_err(&engine->i915->drm, 257 "%s initialization failed; " 258 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 259 engine->name, 260 ENGINE_READ(engine, RING_CTL), 261 ENGINE_READ(engine, RING_CTL) & RING_VALID, 262 ENGINE_READ(engine, RING_HEAD), ring->head, 263 ENGINE_READ(engine, RING_TAIL), ring->tail, 264 ENGINE_READ(engine, RING_START), 265 i915_ggtt_offset(ring->vma)); 266 return -EIO; 267 } 268 269 static void sanitize_hwsp(struct intel_engine_cs *engine) 270 { 271 struct intel_timeline *tl; 272 273 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) 274 intel_timeline_reset_seqno(tl); 275 } 276 277 static void xcs_sanitize(struct intel_engine_cs *engine) 278 { 279 /* 280 * Poison residual state on resume, in case the suspend didn't! 281 * 282 * We have to assume that across suspend/resume (or other loss 283 * of control) that the contents of our pinned buffers has been 284 * lost, replaced by garbage. Since this doesn't always happen, 285 * let's poison such state so that we more quickly spot when 286 * we falsely assume it has been preserved. 287 */ 288 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 289 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); 290 291 /* 292 * The kernel_context HWSP is stored in the status_page. As above, 293 * that may be lost on resume/initialisation, and so we need to 294 * reset the value in the HWSP. 295 */ 296 sanitize_hwsp(engine); 297 298 /* And scrub the dirty cachelines for the HWSP */ 299 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); 300 301 intel_engine_reset_pinned_contexts(engine); 302 } 303 304 static void reset_prepare(struct intel_engine_cs *engine) 305 { 306 /* 307 * We stop engines, otherwise we might get failed reset and a 308 * dead gpu (on elk). Also as modern gpu as kbl can suffer 309 * from system hang if batchbuffer is progressing when 310 * the reset is issued, regardless of READY_TO_RESET ack. 311 * Thus assume it is best to stop engines on all gens 312 * where we have a gpu reset. 313 * 314 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 315 * 316 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 317 * WaClearRingBufHeadRegAtInit:ctg,elk 318 * 319 * FIXME: Wa for more modern gens needs to be validated 320 */ 321 ENGINE_TRACE(engine, "\n"); 322 intel_engine_stop_cs(engine); 323 324 if (!stop_ring(engine)) { 325 /* G45 ring initialization often fails to reset head to zero */ 326 ENGINE_TRACE(engine, 327 "HEAD not reset to zero, " 328 "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n", 329 ENGINE_READ_FW(engine, RING_CTL), 330 ENGINE_READ_FW(engine, RING_HEAD), 331 ENGINE_READ_FW(engine, RING_TAIL), 332 ENGINE_READ_FW(engine, RING_START)); 333 if (!stop_ring(engine)) { 334 drm_err(&engine->i915->drm, 335 "failed to set %s head to zero " 336 "ctl %08x head %08x tail %08x start %08x\n", 337 engine->name, 338 ENGINE_READ_FW(engine, RING_CTL), 339 ENGINE_READ_FW(engine, RING_HEAD), 340 ENGINE_READ_FW(engine, RING_TAIL), 341 ENGINE_READ_FW(engine, RING_START)); 342 } 343 } 344 } 345 346 static void reset_rewind(struct intel_engine_cs *engine, bool stalled) 347 { 348 struct i915_request *pos, *rq; 349 unsigned long flags; 350 u32 head; 351 352 rq = NULL; 353 spin_lock_irqsave(&engine->sched_engine->lock, flags); 354 rcu_read_lock(); 355 list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) { 356 if (!__i915_request_is_complete(pos)) { 357 rq = pos; 358 break; 359 } 360 } 361 rcu_read_unlock(); 362 363 /* 364 * The guilty request will get skipped on a hung engine. 365 * 366 * Users of client default contexts do not rely on logical 367 * state preserved between batches so it is safe to execute 368 * queued requests following the hang. Non default contexts 369 * rely on preserved state, so skipping a batch loses the 370 * evolution of the state and it needs to be considered corrupted. 371 * Executing more queued batches on top of corrupted state is 372 * risky. But we take the risk by trying to advance through 373 * the queued requests in order to make the client behaviour 374 * more predictable around resets, by not throwing away random 375 * amount of batches it has prepared for execution. Sophisticated 376 * clients can use gem_reset_stats_ioctl and dma fence status 377 * (exported via sync_file info ioctl on explicit fences) to observe 378 * when it loses the context state and should rebuild accordingly. 379 * 380 * The context ban, and ultimately the client ban, mechanism are safety 381 * valves if client submission ends up resulting in nothing more than 382 * subsequent hangs. 383 */ 384 385 if (rq) { 386 /* 387 * Try to restore the logical GPU state to match the 388 * continuation of the request queue. If we skip the 389 * context/PD restore, then the next request may try to execute 390 * assuming that its context is valid and loaded on the GPU and 391 * so may try to access invalid memory, prompting repeated GPU 392 * hangs. 393 * 394 * If the request was guilty, we still restore the logical 395 * state in case the next request requires it (e.g. the 396 * aliasing ppgtt), but skip over the hung batch. 397 * 398 * If the request was innocent, we try to replay the request 399 * with the restored context. 400 */ 401 __i915_request_reset(rq, stalled); 402 403 GEM_BUG_ON(rq->ring != engine->legacy.ring); 404 head = rq->head; 405 } else { 406 head = engine->legacy.ring->tail; 407 } 408 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 409 410 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 411 } 412 413 static void reset_finish(struct intel_engine_cs *engine) 414 { 415 } 416 417 static void reset_cancel(struct intel_engine_cs *engine) 418 { 419 struct i915_request *request; 420 unsigned long flags; 421 422 spin_lock_irqsave(&engine->sched_engine->lock, flags); 423 424 /* Mark all submitted requests as skipped. */ 425 list_for_each_entry(request, &engine->sched_engine->requests, sched.link) 426 i915_request_put(i915_request_mark_eio(request)); 427 intel_engine_signal_breadcrumbs(engine); 428 429 /* Remaining _unready_ requests will be nop'ed when submitted */ 430 431 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 432 } 433 434 static void i9xx_submit_request(struct i915_request *request) 435 { 436 i915_request_submit(request); 437 wmb(); /* paranoid flush writes out of the WCB before mmio */ 438 439 ENGINE_WRITE(request->engine, RING_TAIL, 440 intel_ring_set_tail(request->ring, request->tail)); 441 } 442 443 static void __ring_context_fini(struct intel_context *ce) 444 { 445 i915_vma_put(ce->state); 446 } 447 448 static void ring_context_destroy(struct kref *ref) 449 { 450 struct intel_context *ce = container_of(ref, typeof(*ce), ref); 451 452 GEM_BUG_ON(intel_context_is_pinned(ce)); 453 454 if (ce->state) 455 __ring_context_fini(ce); 456 457 intel_context_fini(ce); 458 intel_context_free(ce); 459 } 460 461 static int ring_context_init_default_state(struct intel_context *ce, 462 struct i915_gem_ww_ctx *ww) 463 { 464 struct drm_i915_gem_object *obj = ce->state->obj; 465 void *vaddr; 466 467 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 468 if (IS_ERR(vaddr)) 469 return PTR_ERR(vaddr); 470 471 shmem_read(ce->engine->default_state, 0, 472 vaddr, ce->engine->context_size); 473 474 i915_gem_object_flush_map(obj); 475 __i915_gem_object_release_map(obj); 476 477 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 478 return 0; 479 } 480 481 static int ring_context_pre_pin(struct intel_context *ce, 482 struct i915_gem_ww_ctx *ww, 483 void **unused) 484 { 485 struct i915_address_space *vm; 486 int err = 0; 487 488 if (ce->engine->default_state && 489 !test_bit(CONTEXT_VALID_BIT, &ce->flags)) { 490 err = ring_context_init_default_state(ce, ww); 491 if (err) 492 return err; 493 } 494 495 vm = vm_alias(ce->vm); 496 if (vm) 497 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww); 498 499 return err; 500 } 501 502 static void __context_unpin_ppgtt(struct intel_context *ce) 503 { 504 struct i915_address_space *vm; 505 506 vm = vm_alias(ce->vm); 507 if (vm) 508 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); 509 } 510 511 static void ring_context_unpin(struct intel_context *ce) 512 { 513 } 514 515 static void ring_context_post_unpin(struct intel_context *ce) 516 { 517 __context_unpin_ppgtt(ce); 518 } 519 520 static struct i915_vma * 521 alloc_context_vma(struct intel_engine_cs *engine) 522 { 523 struct drm_i915_private *i915 = engine->i915; 524 struct drm_i915_gem_object *obj; 525 struct i915_vma *vma; 526 int err; 527 528 obj = i915_gem_object_create_shmem(i915, engine->context_size); 529 if (IS_ERR(obj)) 530 return ERR_CAST(obj); 531 532 /* 533 * Try to make the context utilize L3 as well as LLC. 534 * 535 * On VLV we don't have L3 controls in the PTEs so we 536 * shouldn't touch the cache level, especially as that 537 * would make the object snooped which might have a 538 * negative performance impact. 539 * 540 * Snooping is required on non-llc platforms in execlist 541 * mode, but since all GGTT accesses use PAT entry 0 we 542 * get snooping anyway regardless of cache_level. 543 * 544 * This is only applicable for Ivy Bridge devices since 545 * later platforms don't have L3 control bits in the PTE. 546 */ 547 if (IS_IVYBRIDGE(i915)) 548 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); 549 550 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 551 if (IS_ERR(vma)) { 552 err = PTR_ERR(vma); 553 goto err_obj; 554 } 555 556 return vma; 557 558 err_obj: 559 i915_gem_object_put(obj); 560 return ERR_PTR(err); 561 } 562 563 static int ring_context_alloc(struct intel_context *ce) 564 { 565 struct intel_engine_cs *engine = ce->engine; 566 567 /* One ringbuffer to rule them all */ 568 GEM_BUG_ON(!engine->legacy.ring); 569 ce->ring = engine->legacy.ring; 570 ce->timeline = intel_timeline_get(engine->legacy.timeline); 571 572 GEM_BUG_ON(ce->state); 573 if (engine->context_size) { 574 struct i915_vma *vma; 575 576 vma = alloc_context_vma(engine); 577 if (IS_ERR(vma)) 578 return PTR_ERR(vma); 579 580 ce->state = vma; 581 } 582 583 return 0; 584 } 585 586 static int ring_context_pin(struct intel_context *ce, void *unused) 587 { 588 return 0; 589 } 590 591 static void ring_context_reset(struct intel_context *ce) 592 { 593 intel_ring_reset(ce->ring, ce->ring->emit); 594 clear_bit(CONTEXT_VALID_BIT, &ce->flags); 595 } 596 597 static void ring_context_ban(struct intel_context *ce, 598 struct i915_request *rq) 599 { 600 struct intel_engine_cs *engine; 601 602 if (!rq || !i915_request_is_active(rq)) 603 return; 604 605 engine = rq->engine; 606 lockdep_assert_held(&engine->sched_engine->lock); 607 list_for_each_entry_continue(rq, &engine->sched_engine->requests, 608 sched.link) 609 if (rq->context == ce) { 610 i915_request_set_error_once(rq, -EIO); 611 __i915_request_skip(rq); 612 } 613 } 614 615 static void ring_context_cancel_request(struct intel_context *ce, 616 struct i915_request *rq) 617 { 618 struct intel_engine_cs *engine = NULL; 619 620 i915_request_active_engine(rq, &engine); 621 622 if (engine && intel_engine_pulse(engine)) 623 intel_gt_handle_error(engine->gt, engine->mask, 0, 624 "request cancellation by %s", 625 current->comm); 626 } 627 628 static const struct intel_context_ops ring_context_ops = { 629 .alloc = ring_context_alloc, 630 631 .cancel_request = ring_context_cancel_request, 632 633 .ban = ring_context_ban, 634 635 .pre_pin = ring_context_pre_pin, 636 .pin = ring_context_pin, 637 .unpin = ring_context_unpin, 638 .post_unpin = ring_context_post_unpin, 639 640 .enter = intel_context_enter_engine, 641 .exit = intel_context_exit_engine, 642 643 .reset = ring_context_reset, 644 .destroy = ring_context_destroy, 645 }; 646 647 static int load_pd_dir(struct i915_request *rq, 648 struct i915_address_space *vm, 649 u32 valid) 650 { 651 const struct intel_engine_cs * const engine = rq->engine; 652 u32 *cs; 653 654 cs = intel_ring_begin(rq, 12); 655 if (IS_ERR(cs)) 656 return PTR_ERR(cs); 657 658 *cs++ = MI_LOAD_REGISTER_IMM(1); 659 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); 660 *cs++ = valid; 661 662 *cs++ = MI_LOAD_REGISTER_IMM(1); 663 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 664 *cs++ = pp_dir(vm); 665 666 /* Stall until the page table load is complete? */ 667 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 668 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 669 *cs++ = intel_gt_scratch_offset(engine->gt, 670 INTEL_GT_SCRATCH_FIELD_DEFAULT); 671 672 *cs++ = MI_LOAD_REGISTER_IMM(1); 673 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); 674 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE); 675 676 intel_ring_advance(rq, cs); 677 678 return rq->engine->emit_flush(rq, EMIT_FLUSH); 679 } 680 681 static int mi_set_context(struct i915_request *rq, 682 struct intel_context *ce, 683 u32 flags) 684 { 685 struct intel_engine_cs *engine = rq->engine; 686 struct drm_i915_private *i915 = engine->i915; 687 enum intel_engine_id id; 688 const int num_engines = 689 IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0; 690 bool force_restore = false; 691 int len; 692 u32 *cs; 693 694 len = 4; 695 if (GRAPHICS_VER(i915) == 7) 696 len += 2 + (num_engines ? 4 * num_engines + 6 : 0); 697 else if (GRAPHICS_VER(i915) == 5) 698 len += 2; 699 if (flags & MI_FORCE_RESTORE) { 700 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 701 flags &= ~MI_FORCE_RESTORE; 702 force_restore = true; 703 len += 2; 704 } 705 706 cs = intel_ring_begin(rq, len); 707 if (IS_ERR(cs)) 708 return PTR_ERR(cs); 709 710 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 711 if (GRAPHICS_VER(i915) == 7) { 712 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 713 if (num_engines) { 714 struct intel_engine_cs *signaller; 715 716 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 717 for_each_engine(signaller, engine->gt, id) { 718 if (signaller == engine) 719 continue; 720 721 *cs++ = i915_mmio_reg_offset( 722 RING_PSMI_CTL(signaller->mmio_base)); 723 *cs++ = _MASKED_BIT_ENABLE( 724 GEN6_PSMI_SLEEP_MSG_DISABLE); 725 } 726 } 727 } else if (GRAPHICS_VER(i915) == 5) { 728 /* 729 * This w/a is only listed for pre-production ilk a/b steppings, 730 * but is also mentioned for programming the powerctx. To be 731 * safe, just apply the workaround; we do not use SyncFlush so 732 * this should never take effect and so be a no-op! 733 */ 734 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; 735 } 736 737 if (force_restore) { 738 /* 739 * The HW doesn't handle being told to restore the current 740 * context very well. Quite often it likes goes to go off and 741 * sulk, especially when it is meant to be reloading PP_DIR. 742 * A very simple fix to force the reload is to simply switch 743 * away from the current context and back again. 744 * 745 * Note that the kernel_context will contain random state 746 * following the INHIBIT_RESTORE. We accept this since we 747 * never use the kernel_context state; it is merely a 748 * placeholder we use to flush other contexts. 749 */ 750 *cs++ = MI_SET_CONTEXT; 751 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | 752 MI_MM_SPACE_GTT | 753 MI_RESTORE_INHIBIT; 754 } 755 756 *cs++ = MI_NOOP; 757 *cs++ = MI_SET_CONTEXT; 758 *cs++ = i915_ggtt_offset(ce->state) | flags; 759 /* 760 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 761 * WaMiSetContext_Hang:snb,ivb,vlv 762 */ 763 *cs++ = MI_NOOP; 764 765 if (GRAPHICS_VER(i915) == 7) { 766 if (num_engines) { 767 struct intel_engine_cs *signaller; 768 i915_reg_t last_reg = {}; /* keep gcc quiet */ 769 770 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 771 for_each_engine(signaller, engine->gt, id) { 772 if (signaller == engine) 773 continue; 774 775 last_reg = RING_PSMI_CTL(signaller->mmio_base); 776 *cs++ = i915_mmio_reg_offset(last_reg); 777 *cs++ = _MASKED_BIT_DISABLE( 778 GEN6_PSMI_SLEEP_MSG_DISABLE); 779 } 780 781 /* Insert a delay before the next switch! */ 782 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 783 *cs++ = i915_mmio_reg_offset(last_reg); 784 *cs++ = intel_gt_scratch_offset(engine->gt, 785 INTEL_GT_SCRATCH_FIELD_DEFAULT); 786 *cs++ = MI_NOOP; 787 } 788 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 789 } else if (GRAPHICS_VER(i915) == 5) { 790 *cs++ = MI_SUSPEND_FLUSH; 791 } 792 793 intel_ring_advance(rq, cs); 794 795 return 0; 796 } 797 798 static int remap_l3_slice(struct i915_request *rq, int slice) 799 { 800 #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32)) 801 u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice]; 802 int i; 803 804 if (!remap_info) 805 return 0; 806 807 cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2); 808 if (IS_ERR(cs)) 809 return PTR_ERR(cs); 810 811 /* 812 * Note: We do not worry about the concurrent register cacheline hang 813 * here because no other code should access these registers other than 814 * at initialization time. 815 */ 816 *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW); 817 for (i = 0; i < L3LOG_DW; i++) { 818 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 819 *cs++ = remap_info[i]; 820 } 821 *cs++ = MI_NOOP; 822 intel_ring_advance(rq, cs); 823 824 return 0; 825 #undef L3LOG_DW 826 } 827 828 static int remap_l3(struct i915_request *rq) 829 { 830 struct i915_gem_context *ctx = i915_request_gem_context(rq); 831 int i, err; 832 833 if (!ctx || !ctx->remap_slice) 834 return 0; 835 836 for (i = 0; i < MAX_L3_SLICES; i++) { 837 if (!(ctx->remap_slice & BIT(i))) 838 continue; 839 840 err = remap_l3_slice(rq, i); 841 if (err) 842 return err; 843 } 844 845 ctx->remap_slice = 0; 846 return 0; 847 } 848 849 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) 850 { 851 int ret; 852 853 if (!vm) 854 return 0; 855 856 ret = rq->engine->emit_flush(rq, EMIT_FLUSH); 857 if (ret) 858 return ret; 859 860 /* 861 * Not only do we need a full barrier (post-sync write) after 862 * invalidating the TLBs, but we need to wait a little bit 863 * longer. Whether this is merely delaying us, or the 864 * subsequent flush is a key part of serialising with the 865 * post-sync op, this extra pass appears vital before a 866 * mm switch! 867 */ 868 ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G); 869 if (ret) 870 return ret; 871 872 return rq->engine->emit_flush(rq, EMIT_INVALIDATE); 873 } 874 875 static int clear_residuals(struct i915_request *rq) 876 { 877 struct intel_engine_cs *engine = rq->engine; 878 int ret; 879 880 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); 881 if (ret) 882 return ret; 883 884 if (engine->kernel_context->state) { 885 ret = mi_set_context(rq, 886 engine->kernel_context, 887 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT); 888 if (ret) 889 return ret; 890 } 891 892 ret = engine->emit_bb_start(rq, 893 engine->wa_ctx.vma->node.start, 0, 894 0); 895 if (ret) 896 return ret; 897 898 ret = engine->emit_flush(rq, EMIT_FLUSH); 899 if (ret) 900 return ret; 901 902 /* Always invalidate before the next switch_mm() */ 903 return engine->emit_flush(rq, EMIT_INVALIDATE); 904 } 905 906 static int switch_context(struct i915_request *rq) 907 { 908 struct intel_engine_cs *engine = rq->engine; 909 struct intel_context *ce = rq->context; 910 void **residuals = NULL; 911 int ret; 912 913 GEM_BUG_ON(HAS_EXECLISTS(engine->i915)); 914 915 if (engine->wa_ctx.vma && ce != engine->kernel_context) { 916 if (engine->wa_ctx.vma->private != ce && 917 i915_mitigate_clear_residuals()) { 918 ret = clear_residuals(rq); 919 if (ret) 920 return ret; 921 922 residuals = &engine->wa_ctx.vma->private; 923 } 924 } 925 926 ret = switch_mm(rq, vm_alias(ce->vm)); 927 if (ret) 928 return ret; 929 930 if (ce->state) { 931 u32 flags; 932 933 GEM_BUG_ON(engine->id != RCS0); 934 935 /* For resource streamer on HSW+ and power context elsewhere */ 936 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); 937 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); 938 939 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; 940 if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) 941 flags |= MI_RESTORE_EXT_STATE_EN; 942 else 943 flags |= MI_RESTORE_INHIBIT; 944 945 ret = mi_set_context(rq, ce, flags); 946 if (ret) 947 return ret; 948 } 949 950 ret = remap_l3(rq); 951 if (ret) 952 return ret; 953 954 /* 955 * Now past the point of no return, this request _will_ be emitted. 956 * 957 * Or at least this preamble will be emitted, the request may be 958 * interrupted prior to submitting the user payload. If so, we 959 * still submit the "empty" request in order to preserve global 960 * state tracking such as this, our tracking of the current 961 * dirty context. 962 */ 963 if (residuals) { 964 intel_context_put(*residuals); 965 *residuals = intel_context_get(ce); 966 } 967 968 return 0; 969 } 970 971 static int ring_request_alloc(struct i915_request *request) 972 { 973 int ret; 974 975 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 976 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); 977 978 /* 979 * Flush enough space to reduce the likelihood of waiting after 980 * we start building the request - in which case we will just 981 * have to repeat work. 982 */ 983 request->reserved_space += LEGACY_REQUEST_SIZE; 984 985 /* Unconditionally invalidate GPU caches and TLBs. */ 986 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 987 if (ret) 988 return ret; 989 990 ret = switch_context(request); 991 if (ret) 992 return ret; 993 994 request->reserved_space -= LEGACY_REQUEST_SIZE; 995 return 0; 996 } 997 998 static void gen6_bsd_submit_request(struct i915_request *request) 999 { 1000 struct intel_uncore *uncore = request->engine->uncore; 1001 1002 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1003 1004 /* Every tail move must follow the sequence below */ 1005 1006 /* Disable notification that the ring is IDLE. The GT 1007 * will then assume that it is busy and bring it out of rc6. 1008 */ 1009 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1010 _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 1011 1012 /* Clear the context id. Here be magic! */ 1013 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); 1014 1015 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1016 if (__intel_wait_for_register_fw(uncore, 1017 RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1018 GEN6_BSD_SLEEP_INDICATOR, 1019 0, 1020 1000, 0, NULL)) 1021 drm_err(&uncore->i915->drm, 1022 "timed out waiting for the BSD ring to wake up\n"); 1023 1024 /* Now that the ring is fully powered up, update the tail */ 1025 i9xx_submit_request(request); 1026 1027 /* Let the ring send IDLE messages to the GT again, 1028 * and so let it sleep to conserve power when idle. 1029 */ 1030 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1031 _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 1032 1033 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1034 } 1035 1036 static void i9xx_set_default_submission(struct intel_engine_cs *engine) 1037 { 1038 engine->submit_request = i9xx_submit_request; 1039 } 1040 1041 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 1042 { 1043 engine->submit_request = gen6_bsd_submit_request; 1044 } 1045 1046 static void ring_release(struct intel_engine_cs *engine) 1047 { 1048 struct drm_i915_private *dev_priv = engine->i915; 1049 1050 drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 && 1051 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 1052 1053 intel_engine_cleanup_common(engine); 1054 1055 if (engine->wa_ctx.vma) { 1056 intel_context_put(engine->wa_ctx.vma->private); 1057 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1058 } 1059 1060 intel_ring_unpin(engine->legacy.ring); 1061 intel_ring_put(engine->legacy.ring); 1062 1063 intel_timeline_unpin(engine->legacy.timeline); 1064 intel_timeline_put(engine->legacy.timeline); 1065 } 1066 1067 static void irq_handler(struct intel_engine_cs *engine, u16 iir) 1068 { 1069 intel_engine_signal_breadcrumbs(engine); 1070 } 1071 1072 static void setup_irq(struct intel_engine_cs *engine) 1073 { 1074 struct drm_i915_private *i915 = engine->i915; 1075 1076 intel_engine_set_irq_handler(engine, irq_handler); 1077 1078 if (GRAPHICS_VER(i915) >= 6) { 1079 engine->irq_enable = gen6_irq_enable; 1080 engine->irq_disable = gen6_irq_disable; 1081 } else if (GRAPHICS_VER(i915) >= 5) { 1082 engine->irq_enable = gen5_irq_enable; 1083 engine->irq_disable = gen5_irq_disable; 1084 } else if (GRAPHICS_VER(i915) >= 3) { 1085 engine->irq_enable = gen3_irq_enable; 1086 engine->irq_disable = gen3_irq_disable; 1087 } else { 1088 engine->irq_enable = gen2_irq_enable; 1089 engine->irq_disable = gen2_irq_disable; 1090 } 1091 } 1092 1093 static void add_to_engine(struct i915_request *rq) 1094 { 1095 lockdep_assert_held(&rq->engine->sched_engine->lock); 1096 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests); 1097 } 1098 1099 static void remove_from_engine(struct i915_request *rq) 1100 { 1101 spin_lock_irq(&rq->engine->sched_engine->lock); 1102 list_del_init(&rq->sched.link); 1103 1104 /* Prevent further __await_execution() registering a cb, then flush */ 1105 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 1106 1107 spin_unlock_irq(&rq->engine->sched_engine->lock); 1108 1109 i915_request_notify_execute_cb_imm(rq); 1110 } 1111 1112 static void setup_common(struct intel_engine_cs *engine) 1113 { 1114 struct drm_i915_private *i915 = engine->i915; 1115 1116 /* gen8+ are only supported with execlists */ 1117 GEM_BUG_ON(GRAPHICS_VER(i915) >= 8); 1118 1119 setup_irq(engine); 1120 1121 engine->resume = xcs_resume; 1122 engine->sanitize = xcs_sanitize; 1123 1124 engine->reset.prepare = reset_prepare; 1125 engine->reset.rewind = reset_rewind; 1126 engine->reset.cancel = reset_cancel; 1127 engine->reset.finish = reset_finish; 1128 1129 engine->add_active_request = add_to_engine; 1130 engine->remove_active_request = remove_from_engine; 1131 1132 engine->cops = &ring_context_ops; 1133 engine->request_alloc = ring_request_alloc; 1134 1135 /* 1136 * Using a global execution timeline; the previous final breadcrumb is 1137 * equivalent to our next initial bread so we can elide 1138 * engine->emit_init_breadcrumb(). 1139 */ 1140 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; 1141 if (GRAPHICS_VER(i915) == 5) 1142 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 1143 1144 engine->set_default_submission = i9xx_set_default_submission; 1145 1146 if (GRAPHICS_VER(i915) >= 6) 1147 engine->emit_bb_start = gen6_emit_bb_start; 1148 else if (GRAPHICS_VER(i915) >= 4) 1149 engine->emit_bb_start = gen4_emit_bb_start; 1150 else if (IS_I830(i915) || IS_I845G(i915)) 1151 engine->emit_bb_start = i830_emit_bb_start; 1152 else 1153 engine->emit_bb_start = gen3_emit_bb_start; 1154 } 1155 1156 static void setup_rcs(struct intel_engine_cs *engine) 1157 { 1158 struct drm_i915_private *i915 = engine->i915; 1159 1160 if (HAS_L3_DPF(i915)) 1161 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1162 1163 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1164 1165 if (GRAPHICS_VER(i915) >= 7) { 1166 engine->emit_flush = gen7_emit_flush_rcs; 1167 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs; 1168 } else if (GRAPHICS_VER(i915) == 6) { 1169 engine->emit_flush = gen6_emit_flush_rcs; 1170 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs; 1171 } else if (GRAPHICS_VER(i915) == 5) { 1172 engine->emit_flush = gen4_emit_flush_rcs; 1173 } else { 1174 if (GRAPHICS_VER(i915) < 4) 1175 engine->emit_flush = gen2_emit_flush; 1176 else 1177 engine->emit_flush = gen4_emit_flush_rcs; 1178 engine->irq_enable_mask = I915_USER_INTERRUPT; 1179 } 1180 1181 if (IS_HASWELL(i915)) 1182 engine->emit_bb_start = hsw_emit_bb_start; 1183 } 1184 1185 static void setup_vcs(struct intel_engine_cs *engine) 1186 { 1187 struct drm_i915_private *i915 = engine->i915; 1188 1189 if (GRAPHICS_VER(i915) >= 6) { 1190 /* gen6 bsd needs a special wa for tail updates */ 1191 if (GRAPHICS_VER(i915) == 6) 1192 engine->set_default_submission = gen6_bsd_set_default_submission; 1193 engine->emit_flush = gen6_emit_flush_vcs; 1194 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1195 1196 if (GRAPHICS_VER(i915) == 6) 1197 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1198 else 1199 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1200 } else { 1201 engine->emit_flush = gen4_emit_flush_vcs; 1202 if (GRAPHICS_VER(i915) == 5) 1203 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 1204 else 1205 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1206 } 1207 } 1208 1209 static void setup_bcs(struct intel_engine_cs *engine) 1210 { 1211 struct drm_i915_private *i915 = engine->i915; 1212 1213 engine->emit_flush = gen6_emit_flush_xcs; 1214 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 1215 1216 if (GRAPHICS_VER(i915) == 6) 1217 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1218 else 1219 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1220 } 1221 1222 static void setup_vecs(struct intel_engine_cs *engine) 1223 { 1224 struct drm_i915_private *i915 = engine->i915; 1225 1226 GEM_BUG_ON(GRAPHICS_VER(i915) < 7); 1227 1228 engine->emit_flush = gen6_emit_flush_xcs; 1229 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 1230 engine->irq_enable = hsw_irq_enable_vecs; 1231 engine->irq_disable = hsw_irq_disable_vecs; 1232 1233 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1234 } 1235 1236 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, 1237 struct i915_vma * const vma) 1238 { 1239 return gen7_setup_clear_gpr_bb(engine, vma); 1240 } 1241 1242 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine, 1243 struct i915_gem_ww_ctx *ww, 1244 struct i915_vma *vma) 1245 { 1246 int err; 1247 1248 err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH); 1249 if (err) 1250 return err; 1251 1252 err = i915_vma_sync(vma); 1253 if (err) 1254 goto err_unpin; 1255 1256 err = gen7_ctx_switch_bb_setup(engine, vma); 1257 if (err) 1258 goto err_unpin; 1259 1260 engine->wa_ctx.vma = vma; 1261 return 0; 1262 1263 err_unpin: 1264 i915_vma_unpin(vma); 1265 return err; 1266 } 1267 1268 static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine) 1269 { 1270 struct drm_i915_gem_object *obj; 1271 struct i915_vma *vma; 1272 int size, err; 1273 1274 if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS) 1275 return NULL; 1276 1277 err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); 1278 if (err < 0) 1279 return ERR_PTR(err); 1280 if (!err) 1281 return NULL; 1282 1283 size = ALIGN(err, PAGE_SIZE); 1284 1285 obj = i915_gem_object_create_internal(engine->i915, size); 1286 if (IS_ERR(obj)) 1287 return ERR_CAST(obj); 1288 1289 vma = i915_vma_instance(obj, engine->gt->vm, NULL); 1290 if (IS_ERR(vma)) { 1291 i915_gem_object_put(obj); 1292 return ERR_CAST(vma); 1293 } 1294 1295 vma->private = intel_context_create(engine); /* dummy residuals */ 1296 if (IS_ERR(vma->private)) { 1297 err = PTR_ERR(vma->private); 1298 vma->private = NULL; 1299 i915_gem_object_put(obj); 1300 return ERR_PTR(err); 1301 } 1302 1303 return vma; 1304 } 1305 1306 int intel_ring_submission_setup(struct intel_engine_cs *engine) 1307 { 1308 struct i915_gem_ww_ctx ww; 1309 struct intel_timeline *timeline; 1310 struct intel_ring *ring; 1311 struct i915_vma *gen7_wa_vma; 1312 int err; 1313 1314 setup_common(engine); 1315 1316 switch (engine->class) { 1317 case RENDER_CLASS: 1318 setup_rcs(engine); 1319 break; 1320 case VIDEO_DECODE_CLASS: 1321 setup_vcs(engine); 1322 break; 1323 case COPY_ENGINE_CLASS: 1324 setup_bcs(engine); 1325 break; 1326 case VIDEO_ENHANCEMENT_CLASS: 1327 setup_vecs(engine); 1328 break; 1329 default: 1330 MISSING_CASE(engine->class); 1331 return -ENODEV; 1332 } 1333 1334 timeline = intel_timeline_create_from_engine(engine, 1335 I915_GEM_HWS_SEQNO_ADDR); 1336 if (IS_ERR(timeline)) { 1337 err = PTR_ERR(timeline); 1338 goto err; 1339 } 1340 GEM_BUG_ON(timeline->has_initial_breadcrumb); 1341 1342 ring = intel_engine_create_ring(engine, SZ_16K); 1343 if (IS_ERR(ring)) { 1344 err = PTR_ERR(ring); 1345 goto err_timeline; 1346 } 1347 1348 GEM_BUG_ON(engine->legacy.ring); 1349 engine->legacy.ring = ring; 1350 engine->legacy.timeline = timeline; 1351 1352 gen7_wa_vma = gen7_ctx_vma(engine); 1353 if (IS_ERR(gen7_wa_vma)) { 1354 err = PTR_ERR(gen7_wa_vma); 1355 goto err_ring; 1356 } 1357 1358 i915_gem_ww_ctx_init(&ww, false); 1359 1360 retry: 1361 err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); 1362 if (!err && gen7_wa_vma) 1363 err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); 1364 if (!err) 1365 err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); 1366 if (!err) 1367 err = intel_timeline_pin(timeline, &ww); 1368 if (!err) { 1369 err = intel_ring_pin(ring, &ww); 1370 if (err) 1371 intel_timeline_unpin(timeline); 1372 } 1373 if (err) 1374 goto out; 1375 1376 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 1377 1378 if (gen7_wa_vma) { 1379 err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma); 1380 if (err) { 1381 intel_ring_unpin(ring); 1382 intel_timeline_unpin(timeline); 1383 } 1384 } 1385 1386 out: 1387 if (err == -EDEADLK) { 1388 err = i915_gem_ww_ctx_backoff(&ww); 1389 if (!err) 1390 goto retry; 1391 } 1392 i915_gem_ww_ctx_fini(&ww); 1393 if (err) 1394 goto err_gen7_put; 1395 1396 /* Finally, take ownership and responsibility for cleanup! */ 1397 engine->release = ring_release; 1398 1399 return 0; 1400 1401 err_gen7_put: 1402 if (gen7_wa_vma) { 1403 intel_context_put(gen7_wa_vma->private); 1404 i915_gem_object_put(gen7_wa_vma->obj); 1405 } 1406 err_ring: 1407 intel_ring_put(ring); 1408 err_timeline: 1409 intel_timeline_put(timeline); 1410 err: 1411 intel_engine_cleanup_common(engine); 1412 return err; 1413 } 1414 1415 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1416 #include "selftest_ring_submission.c" 1417 #endif 1418