1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2008-2021 Intel Corporation 4 */ 5 6 #include "gen2_engine_cs.h" 7 #include "gen6_engine_cs.h" 8 #include "gen6_ppgtt.h" 9 #include "gen7_renderclear.h" 10 #include "i915_drv.h" 11 #include "i915_mitigations.h" 12 #include "intel_breadcrumbs.h" 13 #include "intel_context.h" 14 #include "intel_gt.h" 15 #include "intel_gt_irq.h" 16 #include "intel_reset.h" 17 #include "intel_ring.h" 18 #include "shmem_utils.h" 19 #include "intel_engine_heartbeat.h" 20 #include "intel_engine_pm.h" 21 22 /* Rough estimate of the typical request size, performing a flush, 23 * set-context and then emitting the batch. 24 */ 25 #define LEGACY_REQUEST_SIZE 200 26 27 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 28 { 29 /* 30 * Keep the render interrupt unmasked as this papers over 31 * lost interrupts following a reset. 32 */ 33 if (engine->class == RENDER_CLASS) { 34 if (GRAPHICS_VER(engine->i915) >= 6) 35 mask &= ~BIT(0); 36 else 37 mask &= ~I915_USER_INTERRUPT; 38 } 39 40 intel_engine_set_hwsp_writemask(engine, mask); 41 } 42 43 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 44 { 45 u32 addr; 46 47 addr = lower_32_bits(phys); 48 if (GRAPHICS_VER(engine->i915) >= 4) 49 addr |= (phys >> 28) & 0xf0; 50 51 intel_uncore_write(engine->uncore, HWS_PGA, addr); 52 } 53 54 static struct page *status_page(struct intel_engine_cs *engine) 55 { 56 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 57 58 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 59 return sg_page(obj->mm.pages->sgl); 60 } 61 62 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 63 { 64 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 65 set_hwstam(engine, ~0u); 66 } 67 68 static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 69 { 70 i915_reg_t hwsp; 71 72 /* 73 * The ring status page addresses are no longer next to the rest of 74 * the ring registers as of gen7. 75 */ 76 if (GRAPHICS_VER(engine->i915) == 7) { 77 switch (engine->id) { 78 /* 79 * No more rings exist on Gen7. Default case is only to shut up 80 * gcc switch check warning. 81 */ 82 default: 83 GEM_BUG_ON(engine->id); 84 fallthrough; 85 case RCS0: 86 hwsp = RENDER_HWS_PGA_GEN7; 87 break; 88 case BCS0: 89 hwsp = BLT_HWS_PGA_GEN7; 90 break; 91 case VCS0: 92 hwsp = BSD_HWS_PGA_GEN7; 93 break; 94 case VECS0: 95 hwsp = VEBOX_HWS_PGA_GEN7; 96 break; 97 } 98 } else if (GRAPHICS_VER(engine->i915) == 6) { 99 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 100 } else { 101 hwsp = RING_HWS_PGA(engine->mmio_base); 102 } 103 104 intel_uncore_write_fw(engine->uncore, hwsp, offset); 105 intel_uncore_posting_read_fw(engine->uncore, hwsp); 106 } 107 108 static void flush_cs_tlb(struct intel_engine_cs *engine) 109 { 110 if (!IS_GRAPHICS_VER(engine->i915, 6, 7)) 111 return; 112 113 /* ring should be idle before issuing a sync flush*/ 114 GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 115 116 ENGINE_WRITE_FW(engine, RING_INSTPM, 117 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 118 INSTPM_SYNC_FLUSH)); 119 if (__intel_wait_for_register_fw(engine->uncore, 120 RING_INSTPM(engine->mmio_base), 121 INSTPM_SYNC_FLUSH, 0, 122 2000, 0, NULL)) 123 ENGINE_TRACE(engine, 124 "wait for SyncFlush to complete for TLB invalidation timed out\n"); 125 } 126 127 static void ring_setup_status_page(struct intel_engine_cs *engine) 128 { 129 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 130 set_hwstam(engine, ~0u); 131 132 flush_cs_tlb(engine); 133 } 134 135 static struct i915_address_space *vm_alias(struct i915_address_space *vm) 136 { 137 if (i915_is_ggtt(vm)) 138 vm = &i915_vm_to_ggtt(vm)->alias->vm; 139 140 return vm; 141 } 142 143 static u32 pp_dir(struct i915_address_space *vm) 144 { 145 return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir; 146 } 147 148 static void set_pp_dir(struct intel_engine_cs *engine) 149 { 150 struct i915_address_space *vm = vm_alias(engine->gt->vm); 151 152 if (!vm) 153 return; 154 155 ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 156 ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm)); 157 158 if (GRAPHICS_VER(engine->i915) >= 7) { 159 ENGINE_WRITE_FW(engine, 160 RING_MODE_GEN7, 161 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 162 } 163 } 164 165 static bool stop_ring(struct intel_engine_cs *engine) 166 { 167 /* Empty the ring by skipping to the end */ 168 ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL)); 169 ENGINE_POSTING_READ(engine, RING_HEAD); 170 171 /* The ring must be empty before it is disabled */ 172 ENGINE_WRITE_FW(engine, RING_CTL, 0); 173 ENGINE_POSTING_READ(engine, RING_CTL); 174 175 /* Then reset the disabled ring */ 176 ENGINE_WRITE_FW(engine, RING_HEAD, 0); 177 ENGINE_WRITE_FW(engine, RING_TAIL, 0); 178 179 return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0; 180 } 181 182 static int xcs_resume(struct intel_engine_cs *engine) 183 { 184 struct intel_ring *ring = engine->legacy.ring; 185 186 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", 187 ring->head, ring->tail); 188 189 /* 190 * Double check the ring is empty & disabled before we resume. Called 191 * from atomic context during PCI probe, so _hardirq(). 192 */ 193 intel_synchronize_hardirq(engine->i915); 194 if (!stop_ring(engine)) 195 goto err; 196 197 if (HWS_NEEDS_PHYSICAL(engine->i915)) 198 ring_setup_phys_status_page(engine); 199 else 200 ring_setup_status_page(engine); 201 202 intel_breadcrumbs_reset(engine->breadcrumbs); 203 204 /* Enforce ordering by reading HEAD register back */ 205 ENGINE_POSTING_READ(engine, RING_HEAD); 206 207 /* 208 * Initialize the ring. This must happen _after_ we've cleared the ring 209 * registers with the above sequence (the readback of the HEAD registers 210 * also enforces ordering), otherwise the hw might lose the new ring 211 * register values. 212 */ 213 ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma)); 214 215 /* Check that the ring offsets point within the ring! */ 216 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 217 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 218 intel_ring_update_space(ring); 219 220 set_pp_dir(engine); 221 222 /* First wake the ring up to an empty/idle ring */ 223 ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); 224 ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); 225 ENGINE_POSTING_READ(engine, RING_TAIL); 226 227 ENGINE_WRITE_FW(engine, RING_CTL, 228 RING_CTL_SIZE(ring->size) | RING_VALID); 229 230 /* If the head is still not zero, the ring is dead */ 231 if (__intel_wait_for_register_fw(engine->uncore, 232 RING_CTL(engine->mmio_base), 233 RING_VALID, RING_VALID, 234 5000, 0, NULL)) 235 goto err; 236 237 if (GRAPHICS_VER(engine->i915) > 2) 238 ENGINE_WRITE_FW(engine, 239 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 240 241 /* Now awake, let it get started */ 242 if (ring->tail != ring->head) { 243 ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail); 244 ENGINE_POSTING_READ(engine, RING_TAIL); 245 } 246 247 /* Papering over lost _interrupts_ immediately following the restart */ 248 intel_engine_signal_breadcrumbs(engine); 249 return 0; 250 251 err: 252 drm_err(&engine->i915->drm, 253 "%s initialization failed; " 254 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 255 engine->name, 256 ENGINE_READ(engine, RING_CTL), 257 ENGINE_READ(engine, RING_CTL) & RING_VALID, 258 ENGINE_READ(engine, RING_HEAD), ring->head, 259 ENGINE_READ(engine, RING_TAIL), ring->tail, 260 ENGINE_READ(engine, RING_START), 261 i915_ggtt_offset(ring->vma)); 262 return -EIO; 263 } 264 265 static void sanitize_hwsp(struct intel_engine_cs *engine) 266 { 267 struct intel_timeline *tl; 268 269 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) 270 intel_timeline_reset_seqno(tl); 271 } 272 273 static void xcs_sanitize(struct intel_engine_cs *engine) 274 { 275 /* 276 * Poison residual state on resume, in case the suspend didn't! 277 * 278 * We have to assume that across suspend/resume (or other loss 279 * of control) that the contents of our pinned buffers has been 280 * lost, replaced by garbage. Since this doesn't always happen, 281 * let's poison such state so that we more quickly spot when 282 * we falsely assume it has been preserved. 283 */ 284 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 285 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); 286 287 /* 288 * The kernel_context HWSP is stored in the status_page. As above, 289 * that may be lost on resume/initialisation, and so we need to 290 * reset the value in the HWSP. 291 */ 292 sanitize_hwsp(engine); 293 294 /* And scrub the dirty cachelines for the HWSP */ 295 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); 296 297 intel_engine_reset_pinned_contexts(engine); 298 } 299 300 static void reset_prepare(struct intel_engine_cs *engine) 301 { 302 /* 303 * We stop engines, otherwise we might get failed reset and a 304 * dead gpu (on elk). Also as modern gpu as kbl can suffer 305 * from system hang if batchbuffer is progressing when 306 * the reset is issued, regardless of READY_TO_RESET ack. 307 * Thus assume it is best to stop engines on all gens 308 * where we have a gpu reset. 309 * 310 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 311 * 312 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 313 * WaClearRingBufHeadRegAtInit:ctg,elk 314 * 315 * FIXME: Wa for more modern gens needs to be validated 316 */ 317 ENGINE_TRACE(engine, "\n"); 318 intel_engine_stop_cs(engine); 319 320 if (!stop_ring(engine)) { 321 /* G45 ring initialization often fails to reset head to zero */ 322 ENGINE_TRACE(engine, 323 "HEAD not reset to zero, " 324 "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n", 325 ENGINE_READ_FW(engine, RING_CTL), 326 ENGINE_READ_FW(engine, RING_HEAD), 327 ENGINE_READ_FW(engine, RING_TAIL), 328 ENGINE_READ_FW(engine, RING_START)); 329 if (!stop_ring(engine)) { 330 drm_err(&engine->i915->drm, 331 "failed to set %s head to zero " 332 "ctl %08x head %08x tail %08x start %08x\n", 333 engine->name, 334 ENGINE_READ_FW(engine, RING_CTL), 335 ENGINE_READ_FW(engine, RING_HEAD), 336 ENGINE_READ_FW(engine, RING_TAIL), 337 ENGINE_READ_FW(engine, RING_START)); 338 } 339 } 340 } 341 342 static void reset_rewind(struct intel_engine_cs *engine, bool stalled) 343 { 344 struct i915_request *pos, *rq; 345 unsigned long flags; 346 u32 head; 347 348 rq = NULL; 349 spin_lock_irqsave(&engine->sched_engine->lock, flags); 350 rcu_read_lock(); 351 list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) { 352 if (!__i915_request_is_complete(pos)) { 353 rq = pos; 354 break; 355 } 356 } 357 rcu_read_unlock(); 358 359 /* 360 * The guilty request will get skipped on a hung engine. 361 * 362 * Users of client default contexts do not rely on logical 363 * state preserved between batches so it is safe to execute 364 * queued requests following the hang. Non default contexts 365 * rely on preserved state, so skipping a batch loses the 366 * evolution of the state and it needs to be considered corrupted. 367 * Executing more queued batches on top of corrupted state is 368 * risky. But we take the risk by trying to advance through 369 * the queued requests in order to make the client behaviour 370 * more predictable around resets, by not throwing away random 371 * amount of batches it has prepared for execution. Sophisticated 372 * clients can use gem_reset_stats_ioctl and dma fence status 373 * (exported via sync_file info ioctl on explicit fences) to observe 374 * when it loses the context state and should rebuild accordingly. 375 * 376 * The context ban, and ultimately the client ban, mechanism are safety 377 * valves if client submission ends up resulting in nothing more than 378 * subsequent hangs. 379 */ 380 381 if (rq) { 382 /* 383 * Try to restore the logical GPU state to match the 384 * continuation of the request queue. If we skip the 385 * context/PD restore, then the next request may try to execute 386 * assuming that its context is valid and loaded on the GPU and 387 * so may try to access invalid memory, prompting repeated GPU 388 * hangs. 389 * 390 * If the request was guilty, we still restore the logical 391 * state in case the next request requires it (e.g. the 392 * aliasing ppgtt), but skip over the hung batch. 393 * 394 * If the request was innocent, we try to replay the request 395 * with the restored context. 396 */ 397 __i915_request_reset(rq, stalled); 398 399 GEM_BUG_ON(rq->ring != engine->legacy.ring); 400 head = rq->head; 401 } else { 402 head = engine->legacy.ring->tail; 403 } 404 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 405 406 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 407 } 408 409 static void reset_finish(struct intel_engine_cs *engine) 410 { 411 } 412 413 static void reset_cancel(struct intel_engine_cs *engine) 414 { 415 struct i915_request *request; 416 unsigned long flags; 417 418 spin_lock_irqsave(&engine->sched_engine->lock, flags); 419 420 /* Mark all submitted requests as skipped. */ 421 list_for_each_entry(request, &engine->sched_engine->requests, sched.link) 422 i915_request_put(i915_request_mark_eio(request)); 423 intel_engine_signal_breadcrumbs(engine); 424 425 /* Remaining _unready_ requests will be nop'ed when submitted */ 426 427 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 428 } 429 430 static void i9xx_submit_request(struct i915_request *request) 431 { 432 i915_request_submit(request); 433 wmb(); /* paranoid flush writes out of the WCB before mmio */ 434 435 ENGINE_WRITE(request->engine, RING_TAIL, 436 intel_ring_set_tail(request->ring, request->tail)); 437 } 438 439 static void __ring_context_fini(struct intel_context *ce) 440 { 441 i915_vma_put(ce->state); 442 } 443 444 static void ring_context_destroy(struct kref *ref) 445 { 446 struct intel_context *ce = container_of(ref, typeof(*ce), ref); 447 448 GEM_BUG_ON(intel_context_is_pinned(ce)); 449 450 if (ce->state) 451 __ring_context_fini(ce); 452 453 intel_context_fini(ce); 454 intel_context_free(ce); 455 } 456 457 static int ring_context_init_default_state(struct intel_context *ce, 458 struct i915_gem_ww_ctx *ww) 459 { 460 struct drm_i915_gem_object *obj = ce->state->obj; 461 void *vaddr; 462 463 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 464 if (IS_ERR(vaddr)) 465 return PTR_ERR(vaddr); 466 467 shmem_read(ce->engine->default_state, 0, 468 vaddr, ce->engine->context_size); 469 470 i915_gem_object_flush_map(obj); 471 __i915_gem_object_release_map(obj); 472 473 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 474 return 0; 475 } 476 477 static int ring_context_pre_pin(struct intel_context *ce, 478 struct i915_gem_ww_ctx *ww, 479 void **unused) 480 { 481 struct i915_address_space *vm; 482 int err = 0; 483 484 if (ce->engine->default_state && 485 !test_bit(CONTEXT_VALID_BIT, &ce->flags)) { 486 err = ring_context_init_default_state(ce, ww); 487 if (err) 488 return err; 489 } 490 491 vm = vm_alias(ce->vm); 492 if (vm) 493 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww); 494 495 return err; 496 } 497 498 static void __context_unpin_ppgtt(struct intel_context *ce) 499 { 500 struct i915_address_space *vm; 501 502 vm = vm_alias(ce->vm); 503 if (vm) 504 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); 505 } 506 507 static void ring_context_unpin(struct intel_context *ce) 508 { 509 } 510 511 static void ring_context_post_unpin(struct intel_context *ce) 512 { 513 __context_unpin_ppgtt(ce); 514 } 515 516 static struct i915_vma * 517 alloc_context_vma(struct intel_engine_cs *engine) 518 { 519 struct drm_i915_private *i915 = engine->i915; 520 struct drm_i915_gem_object *obj; 521 struct i915_vma *vma; 522 int err; 523 524 obj = i915_gem_object_create_shmem(i915, engine->context_size); 525 if (IS_ERR(obj)) 526 return ERR_CAST(obj); 527 528 /* 529 * Try to make the context utilize L3 as well as LLC. 530 * 531 * On VLV we don't have L3 controls in the PTEs so we 532 * shouldn't touch the cache level, especially as that 533 * would make the object snooped which might have a 534 * negative performance impact. 535 * 536 * Snooping is required on non-llc platforms in execlist 537 * mode, but since all GGTT accesses use PAT entry 0 we 538 * get snooping anyway regardless of cache_level. 539 * 540 * This is only applicable for Ivy Bridge devices since 541 * later platforms don't have L3 control bits in the PTE. 542 */ 543 if (IS_IVYBRIDGE(i915)) 544 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); 545 546 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 547 if (IS_ERR(vma)) { 548 err = PTR_ERR(vma); 549 goto err_obj; 550 } 551 552 return vma; 553 554 err_obj: 555 i915_gem_object_put(obj); 556 return ERR_PTR(err); 557 } 558 559 static int ring_context_alloc(struct intel_context *ce) 560 { 561 struct intel_engine_cs *engine = ce->engine; 562 563 /* One ringbuffer to rule them all */ 564 GEM_BUG_ON(!engine->legacy.ring); 565 ce->ring = engine->legacy.ring; 566 ce->timeline = intel_timeline_get(engine->legacy.timeline); 567 568 GEM_BUG_ON(ce->state); 569 if (engine->context_size) { 570 struct i915_vma *vma; 571 572 vma = alloc_context_vma(engine); 573 if (IS_ERR(vma)) 574 return PTR_ERR(vma); 575 576 ce->state = vma; 577 } 578 579 return 0; 580 } 581 582 static int ring_context_pin(struct intel_context *ce, void *unused) 583 { 584 return 0; 585 } 586 587 static void ring_context_reset(struct intel_context *ce) 588 { 589 intel_ring_reset(ce->ring, ce->ring->emit); 590 clear_bit(CONTEXT_VALID_BIT, &ce->flags); 591 } 592 593 static void ring_context_ban(struct intel_context *ce, 594 struct i915_request *rq) 595 { 596 struct intel_engine_cs *engine; 597 598 if (!rq || !i915_request_is_active(rq)) 599 return; 600 601 engine = rq->engine; 602 lockdep_assert_held(&engine->sched_engine->lock); 603 list_for_each_entry_continue(rq, &engine->sched_engine->requests, 604 sched.link) 605 if (rq->context == ce) { 606 i915_request_set_error_once(rq, -EIO); 607 __i915_request_skip(rq); 608 } 609 } 610 611 static void ring_context_cancel_request(struct intel_context *ce, 612 struct i915_request *rq) 613 { 614 struct intel_engine_cs *engine = NULL; 615 616 i915_request_active_engine(rq, &engine); 617 618 if (engine && intel_engine_pulse(engine)) 619 intel_gt_handle_error(engine->gt, engine->mask, 0, 620 "request cancellation by %s", 621 current->comm); 622 } 623 624 static const struct intel_context_ops ring_context_ops = { 625 .alloc = ring_context_alloc, 626 627 .cancel_request = ring_context_cancel_request, 628 629 .ban = ring_context_ban, 630 631 .pre_pin = ring_context_pre_pin, 632 .pin = ring_context_pin, 633 .unpin = ring_context_unpin, 634 .post_unpin = ring_context_post_unpin, 635 636 .enter = intel_context_enter_engine, 637 .exit = intel_context_exit_engine, 638 639 .reset = ring_context_reset, 640 .destroy = ring_context_destroy, 641 }; 642 643 static int load_pd_dir(struct i915_request *rq, 644 struct i915_address_space *vm, 645 u32 valid) 646 { 647 const struct intel_engine_cs * const engine = rq->engine; 648 u32 *cs; 649 650 cs = intel_ring_begin(rq, 12); 651 if (IS_ERR(cs)) 652 return PTR_ERR(cs); 653 654 *cs++ = MI_LOAD_REGISTER_IMM(1); 655 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); 656 *cs++ = valid; 657 658 *cs++ = MI_LOAD_REGISTER_IMM(1); 659 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 660 *cs++ = pp_dir(vm); 661 662 /* Stall until the page table load is complete? */ 663 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 664 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 665 *cs++ = intel_gt_scratch_offset(engine->gt, 666 INTEL_GT_SCRATCH_FIELD_DEFAULT); 667 668 *cs++ = MI_LOAD_REGISTER_IMM(1); 669 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); 670 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE); 671 672 intel_ring_advance(rq, cs); 673 674 return rq->engine->emit_flush(rq, EMIT_FLUSH); 675 } 676 677 static int mi_set_context(struct i915_request *rq, 678 struct intel_context *ce, 679 u32 flags) 680 { 681 struct intel_engine_cs *engine = rq->engine; 682 struct drm_i915_private *i915 = engine->i915; 683 enum intel_engine_id id; 684 const int num_engines = 685 IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0; 686 bool force_restore = false; 687 int len; 688 u32 *cs; 689 690 len = 4; 691 if (GRAPHICS_VER(i915) == 7) 692 len += 2 + (num_engines ? 4 * num_engines + 6 : 0); 693 else if (GRAPHICS_VER(i915) == 5) 694 len += 2; 695 if (flags & MI_FORCE_RESTORE) { 696 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 697 flags &= ~MI_FORCE_RESTORE; 698 force_restore = true; 699 len += 2; 700 } 701 702 cs = intel_ring_begin(rq, len); 703 if (IS_ERR(cs)) 704 return PTR_ERR(cs); 705 706 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 707 if (GRAPHICS_VER(i915) == 7) { 708 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 709 if (num_engines) { 710 struct intel_engine_cs *signaller; 711 712 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 713 for_each_engine(signaller, engine->gt, id) { 714 if (signaller == engine) 715 continue; 716 717 *cs++ = i915_mmio_reg_offset( 718 RING_PSMI_CTL(signaller->mmio_base)); 719 *cs++ = _MASKED_BIT_ENABLE( 720 GEN6_PSMI_SLEEP_MSG_DISABLE); 721 } 722 } 723 } else if (GRAPHICS_VER(i915) == 5) { 724 /* 725 * This w/a is only listed for pre-production ilk a/b steppings, 726 * but is also mentioned for programming the powerctx. To be 727 * safe, just apply the workaround; we do not use SyncFlush so 728 * this should never take effect and so be a no-op! 729 */ 730 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; 731 } 732 733 if (force_restore) { 734 /* 735 * The HW doesn't handle being told to restore the current 736 * context very well. Quite often it likes goes to go off and 737 * sulk, especially when it is meant to be reloading PP_DIR. 738 * A very simple fix to force the reload is to simply switch 739 * away from the current context and back again. 740 * 741 * Note that the kernel_context will contain random state 742 * following the INHIBIT_RESTORE. We accept this since we 743 * never use the kernel_context state; it is merely a 744 * placeholder we use to flush other contexts. 745 */ 746 *cs++ = MI_SET_CONTEXT; 747 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | 748 MI_MM_SPACE_GTT | 749 MI_RESTORE_INHIBIT; 750 } 751 752 *cs++ = MI_NOOP; 753 *cs++ = MI_SET_CONTEXT; 754 *cs++ = i915_ggtt_offset(ce->state) | flags; 755 /* 756 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 757 * WaMiSetContext_Hang:snb,ivb,vlv 758 */ 759 *cs++ = MI_NOOP; 760 761 if (GRAPHICS_VER(i915) == 7) { 762 if (num_engines) { 763 struct intel_engine_cs *signaller; 764 i915_reg_t last_reg = {}; /* keep gcc quiet */ 765 766 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 767 for_each_engine(signaller, engine->gt, id) { 768 if (signaller == engine) 769 continue; 770 771 last_reg = RING_PSMI_CTL(signaller->mmio_base); 772 *cs++ = i915_mmio_reg_offset(last_reg); 773 *cs++ = _MASKED_BIT_DISABLE( 774 GEN6_PSMI_SLEEP_MSG_DISABLE); 775 } 776 777 /* Insert a delay before the next switch! */ 778 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 779 *cs++ = i915_mmio_reg_offset(last_reg); 780 *cs++ = intel_gt_scratch_offset(engine->gt, 781 INTEL_GT_SCRATCH_FIELD_DEFAULT); 782 *cs++ = MI_NOOP; 783 } 784 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 785 } else if (GRAPHICS_VER(i915) == 5) { 786 *cs++ = MI_SUSPEND_FLUSH; 787 } 788 789 intel_ring_advance(rq, cs); 790 791 return 0; 792 } 793 794 static int remap_l3_slice(struct i915_request *rq, int slice) 795 { 796 #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32)) 797 u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice]; 798 int i; 799 800 if (!remap_info) 801 return 0; 802 803 cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2); 804 if (IS_ERR(cs)) 805 return PTR_ERR(cs); 806 807 /* 808 * Note: We do not worry about the concurrent register cacheline hang 809 * here because no other code should access these registers other than 810 * at initialization time. 811 */ 812 *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW); 813 for (i = 0; i < L3LOG_DW; i++) { 814 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 815 *cs++ = remap_info[i]; 816 } 817 *cs++ = MI_NOOP; 818 intel_ring_advance(rq, cs); 819 820 return 0; 821 #undef L3LOG_DW 822 } 823 824 static int remap_l3(struct i915_request *rq) 825 { 826 struct i915_gem_context *ctx = i915_request_gem_context(rq); 827 int i, err; 828 829 if (!ctx || !ctx->remap_slice) 830 return 0; 831 832 for (i = 0; i < MAX_L3_SLICES; i++) { 833 if (!(ctx->remap_slice & BIT(i))) 834 continue; 835 836 err = remap_l3_slice(rq, i); 837 if (err) 838 return err; 839 } 840 841 ctx->remap_slice = 0; 842 return 0; 843 } 844 845 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) 846 { 847 int ret; 848 849 if (!vm) 850 return 0; 851 852 ret = rq->engine->emit_flush(rq, EMIT_FLUSH); 853 if (ret) 854 return ret; 855 856 /* 857 * Not only do we need a full barrier (post-sync write) after 858 * invalidating the TLBs, but we need to wait a little bit 859 * longer. Whether this is merely delaying us, or the 860 * subsequent flush is a key part of serialising with the 861 * post-sync op, this extra pass appears vital before a 862 * mm switch! 863 */ 864 ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G); 865 if (ret) 866 return ret; 867 868 return rq->engine->emit_flush(rq, EMIT_INVALIDATE); 869 } 870 871 static int clear_residuals(struct i915_request *rq) 872 { 873 struct intel_engine_cs *engine = rq->engine; 874 int ret; 875 876 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); 877 if (ret) 878 return ret; 879 880 if (engine->kernel_context->state) { 881 ret = mi_set_context(rq, 882 engine->kernel_context, 883 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT); 884 if (ret) 885 return ret; 886 } 887 888 ret = engine->emit_bb_start(rq, 889 engine->wa_ctx.vma->node.start, 0, 890 0); 891 if (ret) 892 return ret; 893 894 ret = engine->emit_flush(rq, EMIT_FLUSH); 895 if (ret) 896 return ret; 897 898 /* Always invalidate before the next switch_mm() */ 899 return engine->emit_flush(rq, EMIT_INVALIDATE); 900 } 901 902 static int switch_context(struct i915_request *rq) 903 { 904 struct intel_engine_cs *engine = rq->engine; 905 struct intel_context *ce = rq->context; 906 void **residuals = NULL; 907 int ret; 908 909 GEM_BUG_ON(HAS_EXECLISTS(engine->i915)); 910 911 if (engine->wa_ctx.vma && ce != engine->kernel_context) { 912 if (engine->wa_ctx.vma->private != ce && 913 i915_mitigate_clear_residuals()) { 914 ret = clear_residuals(rq); 915 if (ret) 916 return ret; 917 918 residuals = &engine->wa_ctx.vma->private; 919 } 920 } 921 922 ret = switch_mm(rq, vm_alias(ce->vm)); 923 if (ret) 924 return ret; 925 926 if (ce->state) { 927 u32 flags; 928 929 GEM_BUG_ON(engine->id != RCS0); 930 931 /* For resource streamer on HSW+ and power context elsewhere */ 932 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); 933 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); 934 935 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; 936 if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) 937 flags |= MI_RESTORE_EXT_STATE_EN; 938 else 939 flags |= MI_RESTORE_INHIBIT; 940 941 ret = mi_set_context(rq, ce, flags); 942 if (ret) 943 return ret; 944 } 945 946 ret = remap_l3(rq); 947 if (ret) 948 return ret; 949 950 /* 951 * Now past the point of no return, this request _will_ be emitted. 952 * 953 * Or at least this preamble will be emitted, the request may be 954 * interrupted prior to submitting the user payload. If so, we 955 * still submit the "empty" request in order to preserve global 956 * state tracking such as this, our tracking of the current 957 * dirty context. 958 */ 959 if (residuals) { 960 intel_context_put(*residuals); 961 *residuals = intel_context_get(ce); 962 } 963 964 return 0; 965 } 966 967 static int ring_request_alloc(struct i915_request *request) 968 { 969 int ret; 970 971 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 972 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); 973 974 /* 975 * Flush enough space to reduce the likelihood of waiting after 976 * we start building the request - in which case we will just 977 * have to repeat work. 978 */ 979 request->reserved_space += LEGACY_REQUEST_SIZE; 980 981 /* Unconditionally invalidate GPU caches and TLBs. */ 982 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 983 if (ret) 984 return ret; 985 986 ret = switch_context(request); 987 if (ret) 988 return ret; 989 990 request->reserved_space -= LEGACY_REQUEST_SIZE; 991 return 0; 992 } 993 994 static void gen6_bsd_submit_request(struct i915_request *request) 995 { 996 struct intel_uncore *uncore = request->engine->uncore; 997 998 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 999 1000 /* Every tail move must follow the sequence below */ 1001 1002 /* Disable notification that the ring is IDLE. The GT 1003 * will then assume that it is busy and bring it out of rc6. 1004 */ 1005 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 1006 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1007 1008 /* Clear the context id. Here be magic! */ 1009 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); 1010 1011 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1012 if (__intel_wait_for_register_fw(uncore, 1013 GEN6_BSD_SLEEP_PSMI_CONTROL, 1014 GEN6_BSD_SLEEP_INDICATOR, 1015 0, 1016 1000, 0, NULL)) 1017 drm_err(&uncore->i915->drm, 1018 "timed out waiting for the BSD ring to wake up\n"); 1019 1020 /* Now that the ring is fully powered up, update the tail */ 1021 i9xx_submit_request(request); 1022 1023 /* Let the ring send IDLE messages to the GT again, 1024 * and so let it sleep to conserve power when idle. 1025 */ 1026 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 1027 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1028 1029 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1030 } 1031 1032 static void i9xx_set_default_submission(struct intel_engine_cs *engine) 1033 { 1034 engine->submit_request = i9xx_submit_request; 1035 } 1036 1037 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 1038 { 1039 engine->submit_request = gen6_bsd_submit_request; 1040 } 1041 1042 static void ring_release(struct intel_engine_cs *engine) 1043 { 1044 struct drm_i915_private *dev_priv = engine->i915; 1045 1046 drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 && 1047 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 1048 1049 intel_engine_cleanup_common(engine); 1050 1051 if (engine->wa_ctx.vma) { 1052 intel_context_put(engine->wa_ctx.vma->private); 1053 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1054 } 1055 1056 intel_ring_unpin(engine->legacy.ring); 1057 intel_ring_put(engine->legacy.ring); 1058 1059 intel_timeline_unpin(engine->legacy.timeline); 1060 intel_timeline_put(engine->legacy.timeline); 1061 } 1062 1063 static void irq_handler(struct intel_engine_cs *engine, u16 iir) 1064 { 1065 intel_engine_signal_breadcrumbs(engine); 1066 } 1067 1068 static void setup_irq(struct intel_engine_cs *engine) 1069 { 1070 struct drm_i915_private *i915 = engine->i915; 1071 1072 intel_engine_set_irq_handler(engine, irq_handler); 1073 1074 if (GRAPHICS_VER(i915) >= 6) { 1075 engine->irq_enable = gen6_irq_enable; 1076 engine->irq_disable = gen6_irq_disable; 1077 } else if (GRAPHICS_VER(i915) >= 5) { 1078 engine->irq_enable = gen5_irq_enable; 1079 engine->irq_disable = gen5_irq_disable; 1080 } else if (GRAPHICS_VER(i915) >= 3) { 1081 engine->irq_enable = gen3_irq_enable; 1082 engine->irq_disable = gen3_irq_disable; 1083 } else { 1084 engine->irq_enable = gen2_irq_enable; 1085 engine->irq_disable = gen2_irq_disable; 1086 } 1087 } 1088 1089 static void add_to_engine(struct i915_request *rq) 1090 { 1091 lockdep_assert_held(&rq->engine->sched_engine->lock); 1092 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests); 1093 } 1094 1095 static void remove_from_engine(struct i915_request *rq) 1096 { 1097 spin_lock_irq(&rq->engine->sched_engine->lock); 1098 list_del_init(&rq->sched.link); 1099 1100 /* Prevent further __await_execution() registering a cb, then flush */ 1101 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 1102 1103 spin_unlock_irq(&rq->engine->sched_engine->lock); 1104 1105 i915_request_notify_execute_cb_imm(rq); 1106 } 1107 1108 static void setup_common(struct intel_engine_cs *engine) 1109 { 1110 struct drm_i915_private *i915 = engine->i915; 1111 1112 /* gen8+ are only supported with execlists */ 1113 GEM_BUG_ON(GRAPHICS_VER(i915) >= 8); 1114 1115 setup_irq(engine); 1116 1117 engine->resume = xcs_resume; 1118 engine->sanitize = xcs_sanitize; 1119 1120 engine->reset.prepare = reset_prepare; 1121 engine->reset.rewind = reset_rewind; 1122 engine->reset.cancel = reset_cancel; 1123 engine->reset.finish = reset_finish; 1124 1125 engine->add_active_request = add_to_engine; 1126 engine->remove_active_request = remove_from_engine; 1127 1128 engine->cops = &ring_context_ops; 1129 engine->request_alloc = ring_request_alloc; 1130 1131 /* 1132 * Using a global execution timeline; the previous final breadcrumb is 1133 * equivalent to our next initial bread so we can elide 1134 * engine->emit_init_breadcrumb(). 1135 */ 1136 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; 1137 if (GRAPHICS_VER(i915) == 5) 1138 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 1139 1140 engine->set_default_submission = i9xx_set_default_submission; 1141 1142 if (GRAPHICS_VER(i915) >= 6) 1143 engine->emit_bb_start = gen6_emit_bb_start; 1144 else if (GRAPHICS_VER(i915) >= 4) 1145 engine->emit_bb_start = gen4_emit_bb_start; 1146 else if (IS_I830(i915) || IS_I845G(i915)) 1147 engine->emit_bb_start = i830_emit_bb_start; 1148 else 1149 engine->emit_bb_start = gen3_emit_bb_start; 1150 } 1151 1152 static void setup_rcs(struct intel_engine_cs *engine) 1153 { 1154 struct drm_i915_private *i915 = engine->i915; 1155 1156 if (HAS_L3_DPF(i915)) 1157 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1158 1159 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1160 1161 if (GRAPHICS_VER(i915) >= 7) { 1162 engine->emit_flush = gen7_emit_flush_rcs; 1163 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs; 1164 } else if (GRAPHICS_VER(i915) == 6) { 1165 engine->emit_flush = gen6_emit_flush_rcs; 1166 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs; 1167 } else if (GRAPHICS_VER(i915) == 5) { 1168 engine->emit_flush = gen4_emit_flush_rcs; 1169 } else { 1170 if (GRAPHICS_VER(i915) < 4) 1171 engine->emit_flush = gen2_emit_flush; 1172 else 1173 engine->emit_flush = gen4_emit_flush_rcs; 1174 engine->irq_enable_mask = I915_USER_INTERRUPT; 1175 } 1176 1177 if (IS_HASWELL(i915)) 1178 engine->emit_bb_start = hsw_emit_bb_start; 1179 } 1180 1181 static void setup_vcs(struct intel_engine_cs *engine) 1182 { 1183 struct drm_i915_private *i915 = engine->i915; 1184 1185 if (GRAPHICS_VER(i915) >= 6) { 1186 /* gen6 bsd needs a special wa for tail updates */ 1187 if (GRAPHICS_VER(i915) == 6) 1188 engine->set_default_submission = gen6_bsd_set_default_submission; 1189 engine->emit_flush = gen6_emit_flush_vcs; 1190 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1191 1192 if (GRAPHICS_VER(i915) == 6) 1193 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1194 else 1195 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1196 } else { 1197 engine->emit_flush = gen4_emit_flush_vcs; 1198 if (GRAPHICS_VER(i915) == 5) 1199 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 1200 else 1201 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1202 } 1203 } 1204 1205 static void setup_bcs(struct intel_engine_cs *engine) 1206 { 1207 struct drm_i915_private *i915 = engine->i915; 1208 1209 engine->emit_flush = gen6_emit_flush_xcs; 1210 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 1211 1212 if (GRAPHICS_VER(i915) == 6) 1213 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1214 else 1215 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1216 } 1217 1218 static void setup_vecs(struct intel_engine_cs *engine) 1219 { 1220 struct drm_i915_private *i915 = engine->i915; 1221 1222 GEM_BUG_ON(GRAPHICS_VER(i915) < 7); 1223 1224 engine->emit_flush = gen6_emit_flush_xcs; 1225 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 1226 engine->irq_enable = hsw_irq_enable_vecs; 1227 engine->irq_disable = hsw_irq_disable_vecs; 1228 1229 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1230 } 1231 1232 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, 1233 struct i915_vma * const vma) 1234 { 1235 return gen7_setup_clear_gpr_bb(engine, vma); 1236 } 1237 1238 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine, 1239 struct i915_gem_ww_ctx *ww, 1240 struct i915_vma *vma) 1241 { 1242 int err; 1243 1244 err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH); 1245 if (err) 1246 return err; 1247 1248 err = i915_vma_sync(vma); 1249 if (err) 1250 goto err_unpin; 1251 1252 err = gen7_ctx_switch_bb_setup(engine, vma); 1253 if (err) 1254 goto err_unpin; 1255 1256 engine->wa_ctx.vma = vma; 1257 return 0; 1258 1259 err_unpin: 1260 i915_vma_unpin(vma); 1261 return err; 1262 } 1263 1264 static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine) 1265 { 1266 struct drm_i915_gem_object *obj; 1267 struct i915_vma *vma; 1268 int size, err; 1269 1270 if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS) 1271 return NULL; 1272 1273 err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); 1274 if (err < 0) 1275 return ERR_PTR(err); 1276 if (!err) 1277 return NULL; 1278 1279 size = ALIGN(err, PAGE_SIZE); 1280 1281 obj = i915_gem_object_create_internal(engine->i915, size); 1282 if (IS_ERR(obj)) 1283 return ERR_CAST(obj); 1284 1285 vma = i915_vma_instance(obj, engine->gt->vm, NULL); 1286 if (IS_ERR(vma)) { 1287 i915_gem_object_put(obj); 1288 return ERR_CAST(vma); 1289 } 1290 1291 vma->private = intel_context_create(engine); /* dummy residuals */ 1292 if (IS_ERR(vma->private)) { 1293 err = PTR_ERR(vma->private); 1294 vma->private = NULL; 1295 i915_gem_object_put(obj); 1296 return ERR_PTR(err); 1297 } 1298 1299 return vma; 1300 } 1301 1302 int intel_ring_submission_setup(struct intel_engine_cs *engine) 1303 { 1304 struct i915_gem_ww_ctx ww; 1305 struct intel_timeline *timeline; 1306 struct intel_ring *ring; 1307 struct i915_vma *gen7_wa_vma; 1308 int err; 1309 1310 setup_common(engine); 1311 1312 switch (engine->class) { 1313 case RENDER_CLASS: 1314 setup_rcs(engine); 1315 break; 1316 case VIDEO_DECODE_CLASS: 1317 setup_vcs(engine); 1318 break; 1319 case COPY_ENGINE_CLASS: 1320 setup_bcs(engine); 1321 break; 1322 case VIDEO_ENHANCEMENT_CLASS: 1323 setup_vecs(engine); 1324 break; 1325 default: 1326 MISSING_CASE(engine->class); 1327 return -ENODEV; 1328 } 1329 1330 timeline = intel_timeline_create_from_engine(engine, 1331 I915_GEM_HWS_SEQNO_ADDR); 1332 if (IS_ERR(timeline)) { 1333 err = PTR_ERR(timeline); 1334 goto err; 1335 } 1336 GEM_BUG_ON(timeline->has_initial_breadcrumb); 1337 1338 ring = intel_engine_create_ring(engine, SZ_16K); 1339 if (IS_ERR(ring)) { 1340 err = PTR_ERR(ring); 1341 goto err_timeline; 1342 } 1343 1344 GEM_BUG_ON(engine->legacy.ring); 1345 engine->legacy.ring = ring; 1346 engine->legacy.timeline = timeline; 1347 1348 gen7_wa_vma = gen7_ctx_vma(engine); 1349 if (IS_ERR(gen7_wa_vma)) { 1350 err = PTR_ERR(gen7_wa_vma); 1351 goto err_ring; 1352 } 1353 1354 i915_gem_ww_ctx_init(&ww, false); 1355 1356 retry: 1357 err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); 1358 if (!err && gen7_wa_vma) 1359 err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); 1360 if (!err && engine->legacy.ring->vma->obj) 1361 err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); 1362 if (!err) 1363 err = intel_timeline_pin(timeline, &ww); 1364 if (!err) { 1365 err = intel_ring_pin(ring, &ww); 1366 if (err) 1367 intel_timeline_unpin(timeline); 1368 } 1369 if (err) 1370 goto out; 1371 1372 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 1373 1374 if (gen7_wa_vma) { 1375 err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma); 1376 if (err) { 1377 intel_ring_unpin(ring); 1378 intel_timeline_unpin(timeline); 1379 } 1380 } 1381 1382 out: 1383 if (err == -EDEADLK) { 1384 err = i915_gem_ww_ctx_backoff(&ww); 1385 if (!err) 1386 goto retry; 1387 } 1388 i915_gem_ww_ctx_fini(&ww); 1389 if (err) 1390 goto err_gen7_put; 1391 1392 /* Finally, take ownership and responsibility for cleanup! */ 1393 engine->release = ring_release; 1394 1395 return 0; 1396 1397 err_gen7_put: 1398 if (gen7_wa_vma) { 1399 intel_context_put(gen7_wa_vma->private); 1400 i915_gem_object_put(gen7_wa_vma->obj); 1401 } 1402 err_ring: 1403 intel_ring_put(ring); 1404 err_timeline: 1405 intel_timeline_put(timeline); 1406 err: 1407 intel_engine_cleanup_common(engine); 1408 return err; 1409 } 1410 1411 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1412 #include "selftest_ring_submission.c" 1413 #endif 1414