1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2008-2021 Intel Corporation 4 */ 5 6 #include <drm/drm_cache.h> 7 8 #include "gem/i915_gem_internal.h" 9 10 #include "gen2_engine_cs.h" 11 #include "gen6_engine_cs.h" 12 #include "gen6_ppgtt.h" 13 #include "gen7_renderclear.h" 14 #include "i915_drv.h" 15 #include "i915_mitigations.h" 16 #include "intel_breadcrumbs.h" 17 #include "intel_context.h" 18 #include "intel_engine_regs.h" 19 #include "intel_gt.h" 20 #include "intel_gt_irq.h" 21 #include "intel_gt_regs.h" 22 #include "intel_reset.h" 23 #include "intel_ring.h" 24 #include "shmem_utils.h" 25 #include "intel_engine_heartbeat.h" 26 #include "intel_engine_pm.h" 27 28 /* Rough estimate of the typical request size, performing a flush, 29 * set-context and then emitting the batch. 30 */ 31 #define LEGACY_REQUEST_SIZE 200 32 33 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 34 { 35 /* 36 * Keep the render interrupt unmasked as this papers over 37 * lost interrupts following a reset. 38 */ 39 if (engine->class == RENDER_CLASS) { 40 if (GRAPHICS_VER(engine->i915) >= 6) 41 mask &= ~BIT(0); 42 else 43 mask &= ~I915_USER_INTERRUPT; 44 } 45 46 intel_engine_set_hwsp_writemask(engine, mask); 47 } 48 49 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 50 { 51 u32 addr; 52 53 addr = lower_32_bits(phys); 54 if (GRAPHICS_VER(engine->i915) >= 4) 55 addr |= (phys >> 28) & 0xf0; 56 57 intel_uncore_write(engine->uncore, HWS_PGA, addr); 58 } 59 60 static struct page *status_page(struct intel_engine_cs *engine) 61 { 62 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 63 64 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 65 return sg_page(obj->mm.pages->sgl); 66 } 67 68 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 69 { 70 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 71 set_hwstam(engine, ~0u); 72 } 73 74 static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 75 { 76 i915_reg_t hwsp; 77 78 /* 79 * The ring status page addresses are no longer next to the rest of 80 * the ring registers as of gen7. 81 */ 82 if (GRAPHICS_VER(engine->i915) == 7) { 83 switch (engine->id) { 84 /* 85 * No more rings exist on Gen7. Default case is only to shut up 86 * gcc switch check warning. 87 */ 88 default: 89 GEM_BUG_ON(engine->id); 90 fallthrough; 91 case RCS0: 92 hwsp = RENDER_HWS_PGA_GEN7; 93 break; 94 case BCS0: 95 hwsp = BLT_HWS_PGA_GEN7; 96 break; 97 case VCS0: 98 hwsp = BSD_HWS_PGA_GEN7; 99 break; 100 case VECS0: 101 hwsp = VEBOX_HWS_PGA_GEN7; 102 break; 103 } 104 } else if (GRAPHICS_VER(engine->i915) == 6) { 105 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 106 } else { 107 hwsp = RING_HWS_PGA(engine->mmio_base); 108 } 109 110 intel_uncore_write_fw(engine->uncore, hwsp, offset); 111 intel_uncore_posting_read_fw(engine->uncore, hwsp); 112 } 113 114 static void flush_cs_tlb(struct intel_engine_cs *engine) 115 { 116 if (!IS_GRAPHICS_VER(engine->i915, 6, 7)) 117 return; 118 119 /* ring should be idle before issuing a sync flush*/ 120 if ((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0) 121 drm_warn(&engine->i915->drm, "%s not idle before sync flush!\n", 122 engine->name); 123 124 ENGINE_WRITE_FW(engine, RING_INSTPM, 125 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 126 INSTPM_SYNC_FLUSH)); 127 if (__intel_wait_for_register_fw(engine->uncore, 128 RING_INSTPM(engine->mmio_base), 129 INSTPM_SYNC_FLUSH, 0, 130 2000, 0, NULL)) 131 ENGINE_TRACE(engine, 132 "wait for SyncFlush to complete for TLB invalidation timed out\n"); 133 } 134 135 static void ring_setup_status_page(struct intel_engine_cs *engine) 136 { 137 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 138 set_hwstam(engine, ~0u); 139 140 flush_cs_tlb(engine); 141 } 142 143 static struct i915_address_space *vm_alias(struct i915_address_space *vm) 144 { 145 if (i915_is_ggtt(vm)) 146 vm = &i915_vm_to_ggtt(vm)->alias->vm; 147 148 return vm; 149 } 150 151 static u32 pp_dir(struct i915_address_space *vm) 152 { 153 return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir; 154 } 155 156 static void set_pp_dir(struct intel_engine_cs *engine) 157 { 158 struct i915_address_space *vm = vm_alias(engine->gt->vm); 159 160 if (!vm) 161 return; 162 163 ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 164 ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm)); 165 166 if (GRAPHICS_VER(engine->i915) >= 7) { 167 ENGINE_WRITE_FW(engine, 168 RING_MODE_GEN7, 169 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 170 } 171 } 172 173 static bool stop_ring(struct intel_engine_cs *engine) 174 { 175 /* Empty the ring by skipping to the end */ 176 ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL)); 177 ENGINE_POSTING_READ(engine, RING_HEAD); 178 179 /* The ring must be empty before it is disabled */ 180 ENGINE_WRITE_FW(engine, RING_CTL, 0); 181 ENGINE_POSTING_READ(engine, RING_CTL); 182 183 /* Then reset the disabled ring */ 184 ENGINE_WRITE_FW(engine, RING_HEAD, 0); 185 ENGINE_WRITE_FW(engine, RING_TAIL, 0); 186 187 return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0; 188 } 189 190 static int xcs_resume(struct intel_engine_cs *engine) 191 { 192 struct intel_ring *ring = engine->legacy.ring; 193 194 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", 195 ring->head, ring->tail); 196 197 /* 198 * Double check the ring is empty & disabled before we resume. Called 199 * from atomic context during PCI probe, so _hardirq(). 200 */ 201 intel_synchronize_hardirq(engine->i915); 202 if (!stop_ring(engine)) 203 goto err; 204 205 if (HWS_NEEDS_PHYSICAL(engine->i915)) 206 ring_setup_phys_status_page(engine); 207 else 208 ring_setup_status_page(engine); 209 210 intel_breadcrumbs_reset(engine->breadcrumbs); 211 212 /* Enforce ordering by reading HEAD register back */ 213 ENGINE_POSTING_READ(engine, RING_HEAD); 214 215 /* 216 * Initialize the ring. This must happen _after_ we've cleared the ring 217 * registers with the above sequence (the readback of the HEAD registers 218 * also enforces ordering), otherwise the hw might lose the new ring 219 * register values. 220 */ 221 ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma)); 222 223 /* Check that the ring offsets point within the ring! */ 224 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 225 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 226 intel_ring_update_space(ring); 227 228 set_pp_dir(engine); 229 230 /* First wake the ring up to an empty/idle ring */ 231 ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); 232 ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); 233 ENGINE_POSTING_READ(engine, RING_TAIL); 234 235 ENGINE_WRITE_FW(engine, RING_CTL, 236 RING_CTL_SIZE(ring->size) | RING_VALID); 237 238 /* If the head is still not zero, the ring is dead */ 239 if (__intel_wait_for_register_fw(engine->uncore, 240 RING_CTL(engine->mmio_base), 241 RING_VALID, RING_VALID, 242 5000, 0, NULL)) 243 goto err; 244 245 if (GRAPHICS_VER(engine->i915) > 2) 246 ENGINE_WRITE_FW(engine, 247 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 248 249 /* Now awake, let it get started */ 250 if (ring->tail != ring->head) { 251 ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail); 252 ENGINE_POSTING_READ(engine, RING_TAIL); 253 } 254 255 /* Papering over lost _interrupts_ immediately following the restart */ 256 intel_engine_signal_breadcrumbs(engine); 257 return 0; 258 259 err: 260 drm_err(&engine->i915->drm, 261 "%s initialization failed; " 262 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 263 engine->name, 264 ENGINE_READ(engine, RING_CTL), 265 ENGINE_READ(engine, RING_CTL) & RING_VALID, 266 ENGINE_READ(engine, RING_HEAD), ring->head, 267 ENGINE_READ(engine, RING_TAIL), ring->tail, 268 ENGINE_READ(engine, RING_START), 269 i915_ggtt_offset(ring->vma)); 270 return -EIO; 271 } 272 273 static void sanitize_hwsp(struct intel_engine_cs *engine) 274 { 275 struct intel_timeline *tl; 276 277 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) 278 intel_timeline_reset_seqno(tl); 279 } 280 281 static void xcs_sanitize(struct intel_engine_cs *engine) 282 { 283 /* 284 * Poison residual state on resume, in case the suspend didn't! 285 * 286 * We have to assume that across suspend/resume (or other loss 287 * of control) that the contents of our pinned buffers has been 288 * lost, replaced by garbage. Since this doesn't always happen, 289 * let's poison such state so that we more quickly spot when 290 * we falsely assume it has been preserved. 291 */ 292 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 293 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); 294 295 /* 296 * The kernel_context HWSP is stored in the status_page. As above, 297 * that may be lost on resume/initialisation, and so we need to 298 * reset the value in the HWSP. 299 */ 300 sanitize_hwsp(engine); 301 302 /* And scrub the dirty cachelines for the HWSP */ 303 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); 304 305 intel_engine_reset_pinned_contexts(engine); 306 } 307 308 static void reset_prepare(struct intel_engine_cs *engine) 309 { 310 /* 311 * We stop engines, otherwise we might get failed reset and a 312 * dead gpu (on elk). Also as modern gpu as kbl can suffer 313 * from system hang if batchbuffer is progressing when 314 * the reset is issued, regardless of READY_TO_RESET ack. 315 * Thus assume it is best to stop engines on all gens 316 * where we have a gpu reset. 317 * 318 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 319 * 320 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 321 * WaClearRingBufHeadRegAtInit:ctg,elk 322 * 323 * FIXME: Wa for more modern gens needs to be validated 324 */ 325 ENGINE_TRACE(engine, "\n"); 326 intel_engine_stop_cs(engine); 327 328 if (!stop_ring(engine)) { 329 /* G45 ring initialization often fails to reset head to zero */ 330 ENGINE_TRACE(engine, 331 "HEAD not reset to zero, " 332 "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n", 333 ENGINE_READ_FW(engine, RING_CTL), 334 ENGINE_READ_FW(engine, RING_HEAD), 335 ENGINE_READ_FW(engine, RING_TAIL), 336 ENGINE_READ_FW(engine, RING_START)); 337 if (!stop_ring(engine)) { 338 drm_err(&engine->i915->drm, 339 "failed to set %s head to zero " 340 "ctl %08x head %08x tail %08x start %08x\n", 341 engine->name, 342 ENGINE_READ_FW(engine, RING_CTL), 343 ENGINE_READ_FW(engine, RING_HEAD), 344 ENGINE_READ_FW(engine, RING_TAIL), 345 ENGINE_READ_FW(engine, RING_START)); 346 } 347 } 348 } 349 350 static void reset_rewind(struct intel_engine_cs *engine, bool stalled) 351 { 352 struct i915_request *pos, *rq; 353 unsigned long flags; 354 u32 head; 355 356 rq = NULL; 357 spin_lock_irqsave(&engine->sched_engine->lock, flags); 358 rcu_read_lock(); 359 list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) { 360 if (!__i915_request_is_complete(pos)) { 361 rq = pos; 362 break; 363 } 364 } 365 rcu_read_unlock(); 366 367 /* 368 * The guilty request will get skipped on a hung engine. 369 * 370 * Users of client default contexts do not rely on logical 371 * state preserved between batches so it is safe to execute 372 * queued requests following the hang. Non default contexts 373 * rely on preserved state, so skipping a batch loses the 374 * evolution of the state and it needs to be considered corrupted. 375 * Executing more queued batches on top of corrupted state is 376 * risky. But we take the risk by trying to advance through 377 * the queued requests in order to make the client behaviour 378 * more predictable around resets, by not throwing away random 379 * amount of batches it has prepared for execution. Sophisticated 380 * clients can use gem_reset_stats_ioctl and dma fence status 381 * (exported via sync_file info ioctl on explicit fences) to observe 382 * when it loses the context state and should rebuild accordingly. 383 * 384 * The context ban, and ultimately the client ban, mechanism are safety 385 * valves if client submission ends up resulting in nothing more than 386 * subsequent hangs. 387 */ 388 389 if (rq) { 390 /* 391 * Try to restore the logical GPU state to match the 392 * continuation of the request queue. If we skip the 393 * context/PD restore, then the next request may try to execute 394 * assuming that its context is valid and loaded on the GPU and 395 * so may try to access invalid memory, prompting repeated GPU 396 * hangs. 397 * 398 * If the request was guilty, we still restore the logical 399 * state in case the next request requires it (e.g. the 400 * aliasing ppgtt), but skip over the hung batch. 401 * 402 * If the request was innocent, we try to replay the request 403 * with the restored context. 404 */ 405 __i915_request_reset(rq, stalled); 406 407 GEM_BUG_ON(rq->ring != engine->legacy.ring); 408 head = rq->head; 409 } else { 410 head = engine->legacy.ring->tail; 411 } 412 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 413 414 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 415 } 416 417 static void reset_finish(struct intel_engine_cs *engine) 418 { 419 } 420 421 static void reset_cancel(struct intel_engine_cs *engine) 422 { 423 struct i915_request *request; 424 unsigned long flags; 425 426 spin_lock_irqsave(&engine->sched_engine->lock, flags); 427 428 /* Mark all submitted requests as skipped. */ 429 list_for_each_entry(request, &engine->sched_engine->requests, sched.link) 430 i915_request_put(i915_request_mark_eio(request)); 431 intel_engine_signal_breadcrumbs(engine); 432 433 /* Remaining _unready_ requests will be nop'ed when submitted */ 434 435 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 436 } 437 438 static void i9xx_submit_request(struct i915_request *request) 439 { 440 i915_request_submit(request); 441 wmb(); /* paranoid flush writes out of the WCB before mmio */ 442 443 ENGINE_WRITE(request->engine, RING_TAIL, 444 intel_ring_set_tail(request->ring, request->tail)); 445 } 446 447 static void __ring_context_fini(struct intel_context *ce) 448 { 449 i915_vma_put(ce->state); 450 } 451 452 static void ring_context_destroy(struct kref *ref) 453 { 454 struct intel_context *ce = container_of(ref, typeof(*ce), ref); 455 456 GEM_BUG_ON(intel_context_is_pinned(ce)); 457 458 if (ce->state) 459 __ring_context_fini(ce); 460 461 intel_context_fini(ce); 462 intel_context_free(ce); 463 } 464 465 static int ring_context_init_default_state(struct intel_context *ce, 466 struct i915_gem_ww_ctx *ww) 467 { 468 struct drm_i915_gem_object *obj = ce->state->obj; 469 void *vaddr; 470 471 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 472 if (IS_ERR(vaddr)) 473 return PTR_ERR(vaddr); 474 475 shmem_read(ce->engine->default_state, 0, 476 vaddr, ce->engine->context_size); 477 478 i915_gem_object_flush_map(obj); 479 __i915_gem_object_release_map(obj); 480 481 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 482 return 0; 483 } 484 485 static int ring_context_pre_pin(struct intel_context *ce, 486 struct i915_gem_ww_ctx *ww, 487 void **unused) 488 { 489 struct i915_address_space *vm; 490 int err = 0; 491 492 if (ce->engine->default_state && 493 !test_bit(CONTEXT_VALID_BIT, &ce->flags)) { 494 err = ring_context_init_default_state(ce, ww); 495 if (err) 496 return err; 497 } 498 499 vm = vm_alias(ce->vm); 500 if (vm) 501 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww); 502 503 return err; 504 } 505 506 static void __context_unpin_ppgtt(struct intel_context *ce) 507 { 508 struct i915_address_space *vm; 509 510 vm = vm_alias(ce->vm); 511 if (vm) 512 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); 513 } 514 515 static void ring_context_unpin(struct intel_context *ce) 516 { 517 } 518 519 static void ring_context_post_unpin(struct intel_context *ce) 520 { 521 __context_unpin_ppgtt(ce); 522 } 523 524 static struct i915_vma * 525 alloc_context_vma(struct intel_engine_cs *engine) 526 { 527 struct drm_i915_private *i915 = engine->i915; 528 struct drm_i915_gem_object *obj; 529 struct i915_vma *vma; 530 int err; 531 532 obj = i915_gem_object_create_shmem(i915, engine->context_size); 533 if (IS_ERR(obj)) 534 return ERR_CAST(obj); 535 536 /* 537 * Try to make the context utilize L3 as well as LLC. 538 * 539 * On VLV we don't have L3 controls in the PTEs so we 540 * shouldn't touch the cache level, especially as that 541 * would make the object snooped which might have a 542 * negative performance impact. 543 * 544 * Snooping is required on non-llc platforms in execlist 545 * mode, but since all GGTT accesses use PAT entry 0 we 546 * get snooping anyway regardless of cache_level. 547 * 548 * This is only applicable for Ivy Bridge devices since 549 * later platforms don't have L3 control bits in the PTE. 550 */ 551 if (IS_IVYBRIDGE(i915)) 552 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); 553 554 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 555 if (IS_ERR(vma)) { 556 err = PTR_ERR(vma); 557 goto err_obj; 558 } 559 560 return vma; 561 562 err_obj: 563 i915_gem_object_put(obj); 564 return ERR_PTR(err); 565 } 566 567 static int ring_context_alloc(struct intel_context *ce) 568 { 569 struct intel_engine_cs *engine = ce->engine; 570 571 /* One ringbuffer to rule them all */ 572 GEM_BUG_ON(!engine->legacy.ring); 573 ce->ring = engine->legacy.ring; 574 ce->timeline = intel_timeline_get(engine->legacy.timeline); 575 576 GEM_BUG_ON(ce->state); 577 if (engine->context_size) { 578 struct i915_vma *vma; 579 580 vma = alloc_context_vma(engine); 581 if (IS_ERR(vma)) 582 return PTR_ERR(vma); 583 584 ce->state = vma; 585 } 586 587 return 0; 588 } 589 590 static int ring_context_pin(struct intel_context *ce, void *unused) 591 { 592 return 0; 593 } 594 595 static void ring_context_reset(struct intel_context *ce) 596 { 597 intel_ring_reset(ce->ring, ce->ring->emit); 598 clear_bit(CONTEXT_VALID_BIT, &ce->flags); 599 } 600 601 static void ring_context_ban(struct intel_context *ce, 602 struct i915_request *rq) 603 { 604 struct intel_engine_cs *engine; 605 606 if (!rq || !i915_request_is_active(rq)) 607 return; 608 609 engine = rq->engine; 610 lockdep_assert_held(&engine->sched_engine->lock); 611 list_for_each_entry_continue(rq, &engine->sched_engine->requests, 612 sched.link) 613 if (rq->context == ce) { 614 i915_request_set_error_once(rq, -EIO); 615 __i915_request_skip(rq); 616 } 617 } 618 619 static void ring_context_cancel_request(struct intel_context *ce, 620 struct i915_request *rq) 621 { 622 struct intel_engine_cs *engine = NULL; 623 624 i915_request_active_engine(rq, &engine); 625 626 if (engine && intel_engine_pulse(engine)) 627 intel_gt_handle_error(engine->gt, engine->mask, 0, 628 "request cancellation by %s", 629 current->comm); 630 } 631 632 static const struct intel_context_ops ring_context_ops = { 633 .alloc = ring_context_alloc, 634 635 .cancel_request = ring_context_cancel_request, 636 637 .ban = ring_context_ban, 638 639 .pre_pin = ring_context_pre_pin, 640 .pin = ring_context_pin, 641 .unpin = ring_context_unpin, 642 .post_unpin = ring_context_post_unpin, 643 644 .enter = intel_context_enter_engine, 645 .exit = intel_context_exit_engine, 646 647 .reset = ring_context_reset, 648 .destroy = ring_context_destroy, 649 }; 650 651 static int load_pd_dir(struct i915_request *rq, 652 struct i915_address_space *vm, 653 u32 valid) 654 { 655 const struct intel_engine_cs * const engine = rq->engine; 656 u32 *cs; 657 658 cs = intel_ring_begin(rq, 12); 659 if (IS_ERR(cs)) 660 return PTR_ERR(cs); 661 662 *cs++ = MI_LOAD_REGISTER_IMM(1); 663 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); 664 *cs++ = valid; 665 666 *cs++ = MI_LOAD_REGISTER_IMM(1); 667 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 668 *cs++ = pp_dir(vm); 669 670 /* Stall until the page table load is complete? */ 671 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 672 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 673 *cs++ = intel_gt_scratch_offset(engine->gt, 674 INTEL_GT_SCRATCH_FIELD_DEFAULT); 675 676 *cs++ = MI_LOAD_REGISTER_IMM(1); 677 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); 678 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE); 679 680 intel_ring_advance(rq, cs); 681 682 return rq->engine->emit_flush(rq, EMIT_FLUSH); 683 } 684 685 static int mi_set_context(struct i915_request *rq, 686 struct intel_context *ce, 687 u32 flags) 688 { 689 struct intel_engine_cs *engine = rq->engine; 690 struct drm_i915_private *i915 = engine->i915; 691 enum intel_engine_id id; 692 const int num_engines = 693 IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0; 694 bool force_restore = false; 695 int len; 696 u32 *cs; 697 698 len = 4; 699 if (GRAPHICS_VER(i915) == 7) 700 len += 2 + (num_engines ? 4 * num_engines + 6 : 0); 701 else if (GRAPHICS_VER(i915) == 5) 702 len += 2; 703 if (flags & MI_FORCE_RESTORE) { 704 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 705 flags &= ~MI_FORCE_RESTORE; 706 force_restore = true; 707 len += 2; 708 } 709 710 cs = intel_ring_begin(rq, len); 711 if (IS_ERR(cs)) 712 return PTR_ERR(cs); 713 714 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 715 if (GRAPHICS_VER(i915) == 7) { 716 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 717 if (num_engines) { 718 struct intel_engine_cs *signaller; 719 720 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 721 for_each_engine(signaller, engine->gt, id) { 722 if (signaller == engine) 723 continue; 724 725 *cs++ = i915_mmio_reg_offset( 726 RING_PSMI_CTL(signaller->mmio_base)); 727 *cs++ = _MASKED_BIT_ENABLE( 728 GEN6_PSMI_SLEEP_MSG_DISABLE); 729 } 730 } 731 } else if (GRAPHICS_VER(i915) == 5) { 732 /* 733 * This w/a is only listed for pre-production ilk a/b steppings, 734 * but is also mentioned for programming the powerctx. To be 735 * safe, just apply the workaround; we do not use SyncFlush so 736 * this should never take effect and so be a no-op! 737 */ 738 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; 739 } 740 741 if (force_restore) { 742 /* 743 * The HW doesn't handle being told to restore the current 744 * context very well. Quite often it likes goes to go off and 745 * sulk, especially when it is meant to be reloading PP_DIR. 746 * A very simple fix to force the reload is to simply switch 747 * away from the current context and back again. 748 * 749 * Note that the kernel_context will contain random state 750 * following the INHIBIT_RESTORE. We accept this since we 751 * never use the kernel_context state; it is merely a 752 * placeholder we use to flush other contexts. 753 */ 754 *cs++ = MI_SET_CONTEXT; 755 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | 756 MI_MM_SPACE_GTT | 757 MI_RESTORE_INHIBIT; 758 } 759 760 *cs++ = MI_NOOP; 761 *cs++ = MI_SET_CONTEXT; 762 *cs++ = i915_ggtt_offset(ce->state) | flags; 763 /* 764 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 765 * WaMiSetContext_Hang:snb,ivb,vlv 766 */ 767 *cs++ = MI_NOOP; 768 769 if (GRAPHICS_VER(i915) == 7) { 770 if (num_engines) { 771 struct intel_engine_cs *signaller; 772 i915_reg_t last_reg = INVALID_MMIO_REG; /* keep gcc quiet */ 773 774 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 775 for_each_engine(signaller, engine->gt, id) { 776 if (signaller == engine) 777 continue; 778 779 last_reg = RING_PSMI_CTL(signaller->mmio_base); 780 *cs++ = i915_mmio_reg_offset(last_reg); 781 *cs++ = _MASKED_BIT_DISABLE( 782 GEN6_PSMI_SLEEP_MSG_DISABLE); 783 } 784 785 /* Insert a delay before the next switch! */ 786 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 787 *cs++ = i915_mmio_reg_offset(last_reg); 788 *cs++ = intel_gt_scratch_offset(engine->gt, 789 INTEL_GT_SCRATCH_FIELD_DEFAULT); 790 *cs++ = MI_NOOP; 791 } 792 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 793 } else if (GRAPHICS_VER(i915) == 5) { 794 *cs++ = MI_SUSPEND_FLUSH; 795 } 796 797 intel_ring_advance(rq, cs); 798 799 return 0; 800 } 801 802 static int remap_l3_slice(struct i915_request *rq, int slice) 803 { 804 #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32)) 805 u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice]; 806 int i; 807 808 if (!remap_info) 809 return 0; 810 811 cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2); 812 if (IS_ERR(cs)) 813 return PTR_ERR(cs); 814 815 /* 816 * Note: We do not worry about the concurrent register cacheline hang 817 * here because no other code should access these registers other than 818 * at initialization time. 819 */ 820 *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW); 821 for (i = 0; i < L3LOG_DW; i++) { 822 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 823 *cs++ = remap_info[i]; 824 } 825 *cs++ = MI_NOOP; 826 intel_ring_advance(rq, cs); 827 828 return 0; 829 #undef L3LOG_DW 830 } 831 832 static int remap_l3(struct i915_request *rq) 833 { 834 struct i915_gem_context *ctx = i915_request_gem_context(rq); 835 int i, err; 836 837 if (!ctx || !ctx->remap_slice) 838 return 0; 839 840 for (i = 0; i < MAX_L3_SLICES; i++) { 841 if (!(ctx->remap_slice & BIT(i))) 842 continue; 843 844 err = remap_l3_slice(rq, i); 845 if (err) 846 return err; 847 } 848 849 ctx->remap_slice = 0; 850 return 0; 851 } 852 853 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) 854 { 855 int ret; 856 857 if (!vm) 858 return 0; 859 860 ret = rq->engine->emit_flush(rq, EMIT_FLUSH); 861 if (ret) 862 return ret; 863 864 /* 865 * Not only do we need a full barrier (post-sync write) after 866 * invalidating the TLBs, but we need to wait a little bit 867 * longer. Whether this is merely delaying us, or the 868 * subsequent flush is a key part of serialising with the 869 * post-sync op, this extra pass appears vital before a 870 * mm switch! 871 */ 872 ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G); 873 if (ret) 874 return ret; 875 876 return rq->engine->emit_flush(rq, EMIT_INVALIDATE); 877 } 878 879 static int clear_residuals(struct i915_request *rq) 880 { 881 struct intel_engine_cs *engine = rq->engine; 882 int ret; 883 884 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); 885 if (ret) 886 return ret; 887 888 if (engine->kernel_context->state) { 889 ret = mi_set_context(rq, 890 engine->kernel_context, 891 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT); 892 if (ret) 893 return ret; 894 } 895 896 ret = engine->emit_bb_start(rq, 897 engine->wa_ctx.vma->node.start, 0, 898 0); 899 if (ret) 900 return ret; 901 902 ret = engine->emit_flush(rq, EMIT_FLUSH); 903 if (ret) 904 return ret; 905 906 /* Always invalidate before the next switch_mm() */ 907 return engine->emit_flush(rq, EMIT_INVALIDATE); 908 } 909 910 static int switch_context(struct i915_request *rq) 911 { 912 struct intel_engine_cs *engine = rq->engine; 913 struct intel_context *ce = rq->context; 914 void **residuals = NULL; 915 int ret; 916 917 GEM_BUG_ON(HAS_EXECLISTS(engine->i915)); 918 919 if (engine->wa_ctx.vma && ce != engine->kernel_context) { 920 if (engine->wa_ctx.vma->private != ce && 921 i915_mitigate_clear_residuals()) { 922 ret = clear_residuals(rq); 923 if (ret) 924 return ret; 925 926 residuals = &engine->wa_ctx.vma->private; 927 } 928 } 929 930 ret = switch_mm(rq, vm_alias(ce->vm)); 931 if (ret) 932 return ret; 933 934 if (ce->state) { 935 u32 flags; 936 937 GEM_BUG_ON(engine->id != RCS0); 938 939 /* For resource streamer on HSW+ and power context elsewhere */ 940 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); 941 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); 942 943 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; 944 if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) 945 flags |= MI_RESTORE_EXT_STATE_EN; 946 else 947 flags |= MI_RESTORE_INHIBIT; 948 949 ret = mi_set_context(rq, ce, flags); 950 if (ret) 951 return ret; 952 } 953 954 ret = remap_l3(rq); 955 if (ret) 956 return ret; 957 958 /* 959 * Now past the point of no return, this request _will_ be emitted. 960 * 961 * Or at least this preamble will be emitted, the request may be 962 * interrupted prior to submitting the user payload. If so, we 963 * still submit the "empty" request in order to preserve global 964 * state tracking such as this, our tracking of the current 965 * dirty context. 966 */ 967 if (residuals) { 968 intel_context_put(*residuals); 969 *residuals = intel_context_get(ce); 970 } 971 972 return 0; 973 } 974 975 static int ring_request_alloc(struct i915_request *request) 976 { 977 int ret; 978 979 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 980 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); 981 982 /* 983 * Flush enough space to reduce the likelihood of waiting after 984 * we start building the request - in which case we will just 985 * have to repeat work. 986 */ 987 request->reserved_space += LEGACY_REQUEST_SIZE; 988 989 /* Unconditionally invalidate GPU caches and TLBs. */ 990 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 991 if (ret) 992 return ret; 993 994 ret = switch_context(request); 995 if (ret) 996 return ret; 997 998 request->reserved_space -= LEGACY_REQUEST_SIZE; 999 return 0; 1000 } 1001 1002 static void gen6_bsd_submit_request(struct i915_request *request) 1003 { 1004 struct intel_uncore *uncore = request->engine->uncore; 1005 1006 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1007 1008 /* Every tail move must follow the sequence below */ 1009 1010 /* Disable notification that the ring is IDLE. The GT 1011 * will then assume that it is busy and bring it out of rc6. 1012 */ 1013 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1014 _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 1015 1016 /* Clear the context id. Here be magic! */ 1017 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); 1018 1019 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1020 if (__intel_wait_for_register_fw(uncore, 1021 RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1022 GEN6_BSD_SLEEP_INDICATOR, 1023 0, 1024 1000, 0, NULL)) 1025 drm_err(&uncore->i915->drm, 1026 "timed out waiting for the BSD ring to wake up\n"); 1027 1028 /* Now that the ring is fully powered up, update the tail */ 1029 i9xx_submit_request(request); 1030 1031 /* Let the ring send IDLE messages to the GT again, 1032 * and so let it sleep to conserve power when idle. 1033 */ 1034 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1035 _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 1036 1037 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1038 } 1039 1040 static void i9xx_set_default_submission(struct intel_engine_cs *engine) 1041 { 1042 engine->submit_request = i9xx_submit_request; 1043 } 1044 1045 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 1046 { 1047 engine->submit_request = gen6_bsd_submit_request; 1048 } 1049 1050 static void ring_release(struct intel_engine_cs *engine) 1051 { 1052 struct drm_i915_private *dev_priv = engine->i915; 1053 1054 drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 && 1055 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 1056 1057 intel_engine_cleanup_common(engine); 1058 1059 if (engine->wa_ctx.vma) { 1060 intel_context_put(engine->wa_ctx.vma->private); 1061 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1062 } 1063 1064 intel_ring_unpin(engine->legacy.ring); 1065 intel_ring_put(engine->legacy.ring); 1066 1067 intel_timeline_unpin(engine->legacy.timeline); 1068 intel_timeline_put(engine->legacy.timeline); 1069 } 1070 1071 static void irq_handler(struct intel_engine_cs *engine, u16 iir) 1072 { 1073 intel_engine_signal_breadcrumbs(engine); 1074 } 1075 1076 static void setup_irq(struct intel_engine_cs *engine) 1077 { 1078 struct drm_i915_private *i915 = engine->i915; 1079 1080 intel_engine_set_irq_handler(engine, irq_handler); 1081 1082 if (GRAPHICS_VER(i915) >= 6) { 1083 engine->irq_enable = gen6_irq_enable; 1084 engine->irq_disable = gen6_irq_disable; 1085 } else if (GRAPHICS_VER(i915) >= 5) { 1086 engine->irq_enable = gen5_irq_enable; 1087 engine->irq_disable = gen5_irq_disable; 1088 } else if (GRAPHICS_VER(i915) >= 3) { 1089 engine->irq_enable = gen3_irq_enable; 1090 engine->irq_disable = gen3_irq_disable; 1091 } else { 1092 engine->irq_enable = gen2_irq_enable; 1093 engine->irq_disable = gen2_irq_disable; 1094 } 1095 } 1096 1097 static void add_to_engine(struct i915_request *rq) 1098 { 1099 lockdep_assert_held(&rq->engine->sched_engine->lock); 1100 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests); 1101 } 1102 1103 static void remove_from_engine(struct i915_request *rq) 1104 { 1105 spin_lock_irq(&rq->engine->sched_engine->lock); 1106 list_del_init(&rq->sched.link); 1107 1108 /* Prevent further __await_execution() registering a cb, then flush */ 1109 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 1110 1111 spin_unlock_irq(&rq->engine->sched_engine->lock); 1112 1113 i915_request_notify_execute_cb_imm(rq); 1114 } 1115 1116 static void setup_common(struct intel_engine_cs *engine) 1117 { 1118 struct drm_i915_private *i915 = engine->i915; 1119 1120 /* gen8+ are only supported with execlists */ 1121 GEM_BUG_ON(GRAPHICS_VER(i915) >= 8); 1122 1123 setup_irq(engine); 1124 1125 engine->resume = xcs_resume; 1126 engine->sanitize = xcs_sanitize; 1127 1128 engine->reset.prepare = reset_prepare; 1129 engine->reset.rewind = reset_rewind; 1130 engine->reset.cancel = reset_cancel; 1131 engine->reset.finish = reset_finish; 1132 1133 engine->add_active_request = add_to_engine; 1134 engine->remove_active_request = remove_from_engine; 1135 1136 engine->cops = &ring_context_ops; 1137 engine->request_alloc = ring_request_alloc; 1138 1139 /* 1140 * Using a global execution timeline; the previous final breadcrumb is 1141 * equivalent to our next initial bread so we can elide 1142 * engine->emit_init_breadcrumb(). 1143 */ 1144 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; 1145 if (GRAPHICS_VER(i915) == 5) 1146 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 1147 1148 engine->set_default_submission = i9xx_set_default_submission; 1149 1150 if (GRAPHICS_VER(i915) >= 6) 1151 engine->emit_bb_start = gen6_emit_bb_start; 1152 else if (GRAPHICS_VER(i915) >= 4) 1153 engine->emit_bb_start = gen4_emit_bb_start; 1154 else if (IS_I830(i915) || IS_I845G(i915)) 1155 engine->emit_bb_start = i830_emit_bb_start; 1156 else 1157 engine->emit_bb_start = gen3_emit_bb_start; 1158 } 1159 1160 static void setup_rcs(struct intel_engine_cs *engine) 1161 { 1162 struct drm_i915_private *i915 = engine->i915; 1163 1164 if (HAS_L3_DPF(i915)) 1165 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1166 1167 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1168 1169 if (GRAPHICS_VER(i915) >= 7) { 1170 engine->emit_flush = gen7_emit_flush_rcs; 1171 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs; 1172 } else if (GRAPHICS_VER(i915) == 6) { 1173 engine->emit_flush = gen6_emit_flush_rcs; 1174 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs; 1175 } else if (GRAPHICS_VER(i915) == 5) { 1176 engine->emit_flush = gen4_emit_flush_rcs; 1177 } else { 1178 if (GRAPHICS_VER(i915) < 4) 1179 engine->emit_flush = gen2_emit_flush; 1180 else 1181 engine->emit_flush = gen4_emit_flush_rcs; 1182 engine->irq_enable_mask = I915_USER_INTERRUPT; 1183 } 1184 1185 if (IS_HASWELL(i915)) 1186 engine->emit_bb_start = hsw_emit_bb_start; 1187 } 1188 1189 static void setup_vcs(struct intel_engine_cs *engine) 1190 { 1191 struct drm_i915_private *i915 = engine->i915; 1192 1193 if (GRAPHICS_VER(i915) >= 6) { 1194 /* gen6 bsd needs a special wa for tail updates */ 1195 if (GRAPHICS_VER(i915) == 6) 1196 engine->set_default_submission = gen6_bsd_set_default_submission; 1197 engine->emit_flush = gen6_emit_flush_vcs; 1198 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1199 1200 if (GRAPHICS_VER(i915) == 6) 1201 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1202 else 1203 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1204 } else { 1205 engine->emit_flush = gen4_emit_flush_vcs; 1206 if (GRAPHICS_VER(i915) == 5) 1207 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 1208 else 1209 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1210 } 1211 } 1212 1213 static void setup_bcs(struct intel_engine_cs *engine) 1214 { 1215 struct drm_i915_private *i915 = engine->i915; 1216 1217 engine->emit_flush = gen6_emit_flush_xcs; 1218 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 1219 1220 if (GRAPHICS_VER(i915) == 6) 1221 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1222 else 1223 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1224 } 1225 1226 static void setup_vecs(struct intel_engine_cs *engine) 1227 { 1228 struct drm_i915_private *i915 = engine->i915; 1229 1230 GEM_BUG_ON(GRAPHICS_VER(i915) < 7); 1231 1232 engine->emit_flush = gen6_emit_flush_xcs; 1233 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 1234 engine->irq_enable = hsw_irq_enable_vecs; 1235 engine->irq_disable = hsw_irq_disable_vecs; 1236 1237 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1238 } 1239 1240 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, 1241 struct i915_vma * const vma) 1242 { 1243 return gen7_setup_clear_gpr_bb(engine, vma); 1244 } 1245 1246 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine, 1247 struct i915_gem_ww_ctx *ww, 1248 struct i915_vma *vma) 1249 { 1250 int err; 1251 1252 err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH); 1253 if (err) 1254 return err; 1255 1256 err = i915_vma_sync(vma); 1257 if (err) 1258 goto err_unpin; 1259 1260 err = gen7_ctx_switch_bb_setup(engine, vma); 1261 if (err) 1262 goto err_unpin; 1263 1264 engine->wa_ctx.vma = vma; 1265 return 0; 1266 1267 err_unpin: 1268 i915_vma_unpin(vma); 1269 return err; 1270 } 1271 1272 static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine) 1273 { 1274 struct drm_i915_gem_object *obj; 1275 struct i915_vma *vma; 1276 int size, err; 1277 1278 if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS) 1279 return NULL; 1280 1281 err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); 1282 if (err < 0) 1283 return ERR_PTR(err); 1284 if (!err) 1285 return NULL; 1286 1287 size = ALIGN(err, PAGE_SIZE); 1288 1289 obj = i915_gem_object_create_internal(engine->i915, size); 1290 if (IS_ERR(obj)) 1291 return ERR_CAST(obj); 1292 1293 vma = i915_vma_instance(obj, engine->gt->vm, NULL); 1294 if (IS_ERR(vma)) { 1295 i915_gem_object_put(obj); 1296 return ERR_CAST(vma); 1297 } 1298 1299 vma->private = intel_context_create(engine); /* dummy residuals */ 1300 if (IS_ERR(vma->private)) { 1301 err = PTR_ERR(vma->private); 1302 vma->private = NULL; 1303 i915_gem_object_put(obj); 1304 return ERR_PTR(err); 1305 } 1306 1307 return vma; 1308 } 1309 1310 int intel_ring_submission_setup(struct intel_engine_cs *engine) 1311 { 1312 struct i915_gem_ww_ctx ww; 1313 struct intel_timeline *timeline; 1314 struct intel_ring *ring; 1315 struct i915_vma *gen7_wa_vma; 1316 int err; 1317 1318 setup_common(engine); 1319 1320 switch (engine->class) { 1321 case RENDER_CLASS: 1322 setup_rcs(engine); 1323 break; 1324 case VIDEO_DECODE_CLASS: 1325 setup_vcs(engine); 1326 break; 1327 case COPY_ENGINE_CLASS: 1328 setup_bcs(engine); 1329 break; 1330 case VIDEO_ENHANCEMENT_CLASS: 1331 setup_vecs(engine); 1332 break; 1333 default: 1334 MISSING_CASE(engine->class); 1335 return -ENODEV; 1336 } 1337 1338 timeline = intel_timeline_create_from_engine(engine, 1339 I915_GEM_HWS_SEQNO_ADDR); 1340 if (IS_ERR(timeline)) { 1341 err = PTR_ERR(timeline); 1342 goto err; 1343 } 1344 GEM_BUG_ON(timeline->has_initial_breadcrumb); 1345 1346 ring = intel_engine_create_ring(engine, SZ_16K); 1347 if (IS_ERR(ring)) { 1348 err = PTR_ERR(ring); 1349 goto err_timeline; 1350 } 1351 1352 GEM_BUG_ON(engine->legacy.ring); 1353 engine->legacy.ring = ring; 1354 engine->legacy.timeline = timeline; 1355 1356 gen7_wa_vma = gen7_ctx_vma(engine); 1357 if (IS_ERR(gen7_wa_vma)) { 1358 err = PTR_ERR(gen7_wa_vma); 1359 goto err_ring; 1360 } 1361 1362 i915_gem_ww_ctx_init(&ww, false); 1363 1364 retry: 1365 err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); 1366 if (!err && gen7_wa_vma) 1367 err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); 1368 if (!err) 1369 err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); 1370 if (!err) 1371 err = intel_timeline_pin(timeline, &ww); 1372 if (!err) { 1373 err = intel_ring_pin(ring, &ww); 1374 if (err) 1375 intel_timeline_unpin(timeline); 1376 } 1377 if (err) 1378 goto out; 1379 1380 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 1381 1382 if (gen7_wa_vma) { 1383 err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma); 1384 if (err) { 1385 intel_ring_unpin(ring); 1386 intel_timeline_unpin(timeline); 1387 } 1388 } 1389 1390 out: 1391 if (err == -EDEADLK) { 1392 err = i915_gem_ww_ctx_backoff(&ww); 1393 if (!err) 1394 goto retry; 1395 } 1396 i915_gem_ww_ctx_fini(&ww); 1397 if (err) 1398 goto err_gen7_put; 1399 1400 /* Finally, take ownership and responsibility for cleanup! */ 1401 engine->release = ring_release; 1402 1403 return 0; 1404 1405 err_gen7_put: 1406 if (gen7_wa_vma) { 1407 intel_context_put(gen7_wa_vma->private); 1408 i915_gem_object_put(gen7_wa_vma->obj); 1409 } 1410 err_ring: 1411 intel_ring_put(ring); 1412 err_timeline: 1413 intel_timeline_put(timeline); 1414 err: 1415 intel_engine_cleanup_common(engine); 1416 return err; 1417 } 1418 1419 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1420 #include "selftest_ring_submission.c" 1421 #endif 1422