1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include "gen2_engine_cs.h" 31 #include "gen6_engine_cs.h" 32 #include "gen6_ppgtt.h" 33 #include "gen7_renderclear.h" 34 #include "i915_drv.h" 35 #include "intel_context.h" 36 #include "intel_gt.h" 37 #include "intel_reset.h" 38 #include "intel_ring.h" 39 #include "shmem_utils.h" 40 41 /* Rough estimate of the typical request size, performing a flush, 42 * set-context and then emitting the batch. 43 */ 44 #define LEGACY_REQUEST_SIZE 200 45 46 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 47 { 48 /* 49 * Keep the render interrupt unmasked as this papers over 50 * lost interrupts following a reset. 51 */ 52 if (engine->class == RENDER_CLASS) { 53 if (INTEL_GEN(engine->i915) >= 6) 54 mask &= ~BIT(0); 55 else 56 mask &= ~I915_USER_INTERRUPT; 57 } 58 59 intel_engine_set_hwsp_writemask(engine, mask); 60 } 61 62 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 63 { 64 u32 addr; 65 66 addr = lower_32_bits(phys); 67 if (INTEL_GEN(engine->i915) >= 4) 68 addr |= (phys >> 28) & 0xf0; 69 70 intel_uncore_write(engine->uncore, HWS_PGA, addr); 71 } 72 73 static struct page *status_page(struct intel_engine_cs *engine) 74 { 75 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 76 77 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 78 return sg_page(obj->mm.pages->sgl); 79 } 80 81 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 82 { 83 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 84 set_hwstam(engine, ~0u); 85 } 86 87 static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 88 { 89 i915_reg_t hwsp; 90 91 /* 92 * The ring status page addresses are no longer next to the rest of 93 * the ring registers as of gen7. 94 */ 95 if (IS_GEN(engine->i915, 7)) { 96 switch (engine->id) { 97 /* 98 * No more rings exist on Gen7. Default case is only to shut up 99 * gcc switch check warning. 100 */ 101 default: 102 GEM_BUG_ON(engine->id); 103 /* fallthrough */ 104 case RCS0: 105 hwsp = RENDER_HWS_PGA_GEN7; 106 break; 107 case BCS0: 108 hwsp = BLT_HWS_PGA_GEN7; 109 break; 110 case VCS0: 111 hwsp = BSD_HWS_PGA_GEN7; 112 break; 113 case VECS0: 114 hwsp = VEBOX_HWS_PGA_GEN7; 115 break; 116 } 117 } else if (IS_GEN(engine->i915, 6)) { 118 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 119 } else { 120 hwsp = RING_HWS_PGA(engine->mmio_base); 121 } 122 123 intel_uncore_write(engine->uncore, hwsp, offset); 124 intel_uncore_posting_read(engine->uncore, hwsp); 125 } 126 127 static void flush_cs_tlb(struct intel_engine_cs *engine) 128 { 129 struct drm_i915_private *dev_priv = engine->i915; 130 131 if (!IS_GEN_RANGE(dev_priv, 6, 7)) 132 return; 133 134 /* ring should be idle before issuing a sync flush*/ 135 drm_WARN_ON(&dev_priv->drm, 136 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 137 138 ENGINE_WRITE(engine, RING_INSTPM, 139 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 140 INSTPM_SYNC_FLUSH)); 141 if (intel_wait_for_register(engine->uncore, 142 RING_INSTPM(engine->mmio_base), 143 INSTPM_SYNC_FLUSH, 0, 144 1000)) 145 drm_err(&dev_priv->drm, 146 "%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 147 engine->name); 148 } 149 150 static void ring_setup_status_page(struct intel_engine_cs *engine) 151 { 152 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 153 set_hwstam(engine, ~0u); 154 155 flush_cs_tlb(engine); 156 } 157 158 static bool stop_ring(struct intel_engine_cs *engine) 159 { 160 struct drm_i915_private *dev_priv = engine->i915; 161 162 if (INTEL_GEN(dev_priv) > 2) { 163 ENGINE_WRITE(engine, 164 RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING)); 165 if (intel_wait_for_register(engine->uncore, 166 RING_MI_MODE(engine->mmio_base), 167 MODE_IDLE, 168 MODE_IDLE, 169 1000)) { 170 drm_err(&dev_priv->drm, 171 "%s : timed out trying to stop ring\n", 172 engine->name); 173 174 /* 175 * Sometimes we observe that the idle flag is not 176 * set even though the ring is empty. So double 177 * check before giving up. 178 */ 179 if (ENGINE_READ(engine, RING_HEAD) != 180 ENGINE_READ(engine, RING_TAIL)) 181 return false; 182 } 183 } 184 185 ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL)); 186 187 ENGINE_WRITE(engine, RING_HEAD, 0); 188 ENGINE_WRITE(engine, RING_TAIL, 0); 189 190 /* The ring must be empty before it is disabled */ 191 ENGINE_WRITE(engine, RING_CTL, 0); 192 193 return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0; 194 } 195 196 static struct i915_address_space *vm_alias(struct i915_address_space *vm) 197 { 198 if (i915_is_ggtt(vm)) 199 vm = &i915_vm_to_ggtt(vm)->alias->vm; 200 201 return vm; 202 } 203 204 static void set_pp_dir(struct intel_engine_cs *engine) 205 { 206 struct i915_address_space *vm = vm_alias(engine->gt->vm); 207 208 if (vm) { 209 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 210 211 ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 212 ENGINE_WRITE(engine, RING_PP_DIR_BASE, 213 px_base(ppgtt->pd)->ggtt_offset << 10); 214 } 215 } 216 217 static int xcs_resume(struct intel_engine_cs *engine) 218 { 219 struct drm_i915_private *dev_priv = engine->i915; 220 struct intel_ring *ring = engine->legacy.ring; 221 int ret = 0; 222 223 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", 224 ring->head, ring->tail); 225 226 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); 227 228 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 229 if (!stop_ring(engine)) { 230 /* G45 ring initialization often fails to reset head to zero */ 231 drm_dbg(&dev_priv->drm, "%s head not reset to zero " 232 "ctl %08x head %08x tail %08x start %08x\n", 233 engine->name, 234 ENGINE_READ(engine, RING_CTL), 235 ENGINE_READ(engine, RING_HEAD), 236 ENGINE_READ(engine, RING_TAIL), 237 ENGINE_READ(engine, RING_START)); 238 239 if (!stop_ring(engine)) { 240 drm_err(&dev_priv->drm, 241 "failed to set %s head to zero " 242 "ctl %08x head %08x tail %08x start %08x\n", 243 engine->name, 244 ENGINE_READ(engine, RING_CTL), 245 ENGINE_READ(engine, RING_HEAD), 246 ENGINE_READ(engine, RING_TAIL), 247 ENGINE_READ(engine, RING_START)); 248 ret = -EIO; 249 goto out; 250 } 251 } 252 253 if (HWS_NEEDS_PHYSICAL(dev_priv)) 254 ring_setup_phys_status_page(engine); 255 else 256 ring_setup_status_page(engine); 257 258 intel_engine_reset_breadcrumbs(engine); 259 260 /* Enforce ordering by reading HEAD register back */ 261 ENGINE_POSTING_READ(engine, RING_HEAD); 262 263 /* 264 * Initialize the ring. This must happen _after_ we've cleared the ring 265 * registers with the above sequence (the readback of the HEAD registers 266 * also enforces ordering), otherwise the hw might lose the new ring 267 * register values. 268 */ 269 ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma)); 270 271 /* Check that the ring offsets point within the ring! */ 272 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 273 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 274 intel_ring_update_space(ring); 275 276 set_pp_dir(engine); 277 278 /* First wake the ring up to an empty/idle ring */ 279 ENGINE_WRITE(engine, RING_HEAD, ring->head); 280 ENGINE_WRITE(engine, RING_TAIL, ring->head); 281 ENGINE_POSTING_READ(engine, RING_TAIL); 282 283 ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); 284 285 /* If the head is still not zero, the ring is dead */ 286 if (intel_wait_for_register(engine->uncore, 287 RING_CTL(engine->mmio_base), 288 RING_VALID, RING_VALID, 289 50)) { 290 drm_err(&dev_priv->drm, "%s initialization failed " 291 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 292 engine->name, 293 ENGINE_READ(engine, RING_CTL), 294 ENGINE_READ(engine, RING_CTL) & RING_VALID, 295 ENGINE_READ(engine, RING_HEAD), ring->head, 296 ENGINE_READ(engine, RING_TAIL), ring->tail, 297 ENGINE_READ(engine, RING_START), 298 i915_ggtt_offset(ring->vma)); 299 ret = -EIO; 300 goto out; 301 } 302 303 if (INTEL_GEN(dev_priv) > 2) 304 ENGINE_WRITE(engine, 305 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 306 307 /* Now awake, let it get started */ 308 if (ring->tail != ring->head) { 309 ENGINE_WRITE(engine, RING_TAIL, ring->tail); 310 ENGINE_POSTING_READ(engine, RING_TAIL); 311 } 312 313 /* Papering over lost _interrupts_ immediately following the restart */ 314 intel_engine_signal_breadcrumbs(engine); 315 out: 316 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); 317 318 return ret; 319 } 320 321 static void reset_prepare(struct intel_engine_cs *engine) 322 { 323 struct intel_uncore *uncore = engine->uncore; 324 const u32 base = engine->mmio_base; 325 326 /* 327 * We stop engines, otherwise we might get failed reset and a 328 * dead gpu (on elk). Also as modern gpu as kbl can suffer 329 * from system hang if batchbuffer is progressing when 330 * the reset is issued, regardless of READY_TO_RESET ack. 331 * Thus assume it is best to stop engines on all gens 332 * where we have a gpu reset. 333 * 334 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 335 * 336 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 337 * 338 * FIXME: Wa for more modern gens needs to be validated 339 */ 340 ENGINE_TRACE(engine, "\n"); 341 342 if (intel_engine_stop_cs(engine)) 343 ENGINE_TRACE(engine, "timed out on STOP_RING\n"); 344 345 intel_uncore_write_fw(uncore, 346 RING_HEAD(base), 347 intel_uncore_read_fw(uncore, RING_TAIL(base))); 348 intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ 349 350 intel_uncore_write_fw(uncore, RING_HEAD(base), 0); 351 intel_uncore_write_fw(uncore, RING_TAIL(base), 0); 352 intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); 353 354 /* The ring must be empty before it is disabled */ 355 intel_uncore_write_fw(uncore, RING_CTL(base), 0); 356 357 /* Check acts as a post */ 358 if (intel_uncore_read_fw(uncore, RING_HEAD(base))) 359 ENGINE_TRACE(engine, "ring head [%x] not parked\n", 360 intel_uncore_read_fw(uncore, RING_HEAD(base))); 361 } 362 363 static void reset_rewind(struct intel_engine_cs *engine, bool stalled) 364 { 365 struct i915_request *pos, *rq; 366 unsigned long flags; 367 u32 head; 368 369 rq = NULL; 370 spin_lock_irqsave(&engine->active.lock, flags); 371 list_for_each_entry(pos, &engine->active.requests, sched.link) { 372 if (!i915_request_completed(pos)) { 373 rq = pos; 374 break; 375 } 376 } 377 378 /* 379 * The guilty request will get skipped on a hung engine. 380 * 381 * Users of client default contexts do not rely on logical 382 * state preserved between batches so it is safe to execute 383 * queued requests following the hang. Non default contexts 384 * rely on preserved state, so skipping a batch loses the 385 * evolution of the state and it needs to be considered corrupted. 386 * Executing more queued batches on top of corrupted state is 387 * risky. But we take the risk by trying to advance through 388 * the queued requests in order to make the client behaviour 389 * more predictable around resets, by not throwing away random 390 * amount of batches it has prepared for execution. Sophisticated 391 * clients can use gem_reset_stats_ioctl and dma fence status 392 * (exported via sync_file info ioctl on explicit fences) to observe 393 * when it loses the context state and should rebuild accordingly. 394 * 395 * The context ban, and ultimately the client ban, mechanism are safety 396 * valves if client submission ends up resulting in nothing more than 397 * subsequent hangs. 398 */ 399 400 if (rq) { 401 /* 402 * Try to restore the logical GPU state to match the 403 * continuation of the request queue. If we skip the 404 * context/PD restore, then the next request may try to execute 405 * assuming that its context is valid and loaded on the GPU and 406 * so may try to access invalid memory, prompting repeated GPU 407 * hangs. 408 * 409 * If the request was guilty, we still restore the logical 410 * state in case the next request requires it (e.g. the 411 * aliasing ppgtt), but skip over the hung batch. 412 * 413 * If the request was innocent, we try to replay the request 414 * with the restored context. 415 */ 416 __i915_request_reset(rq, stalled); 417 418 GEM_BUG_ON(rq->ring != engine->legacy.ring); 419 head = rq->head; 420 } else { 421 head = engine->legacy.ring->tail; 422 } 423 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 424 425 spin_unlock_irqrestore(&engine->active.lock, flags); 426 } 427 428 static void reset_finish(struct intel_engine_cs *engine) 429 { 430 } 431 432 static int rcs_resume(struct intel_engine_cs *engine) 433 { 434 struct drm_i915_private *i915 = engine->i915; 435 struct intel_uncore *uncore = engine->uncore; 436 437 /* 438 * Disable CONSTANT_BUFFER before it is loaded from the context 439 * image. For as it is loaded, it is executed and the stored 440 * address may no longer be valid, leading to a GPU hang. 441 * 442 * This imposes the requirement that userspace reload their 443 * CONSTANT_BUFFER on every batch, fortunately a requirement 444 * they are already accustomed to from before contexts were 445 * enabled. 446 */ 447 if (IS_GEN(i915, 4)) 448 intel_uncore_write(uncore, ECOSKPD, 449 _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE)); 450 451 if (IS_GEN_RANGE(i915, 6, 7)) 452 intel_uncore_write(uncore, INSTPM, 453 _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 454 455 return xcs_resume(engine); 456 } 457 458 static void reset_cancel(struct intel_engine_cs *engine) 459 { 460 struct i915_request *request; 461 unsigned long flags; 462 463 spin_lock_irqsave(&engine->active.lock, flags); 464 465 /* Mark all submitted requests as skipped. */ 466 list_for_each_entry(request, &engine->active.requests, sched.link) { 467 i915_request_set_error_once(request, -EIO); 468 i915_request_mark_complete(request); 469 } 470 471 /* Remaining _unready_ requests will be nop'ed when submitted */ 472 473 spin_unlock_irqrestore(&engine->active.lock, flags); 474 } 475 476 static void i9xx_submit_request(struct i915_request *request) 477 { 478 i915_request_submit(request); 479 wmb(); /* paranoid flush writes out of the WCB before mmio */ 480 481 ENGINE_WRITE(request->engine, RING_TAIL, 482 intel_ring_set_tail(request->ring, request->tail)); 483 } 484 485 static void __ring_context_fini(struct intel_context *ce) 486 { 487 i915_vma_put(ce->state); 488 } 489 490 static void ring_context_destroy(struct kref *ref) 491 { 492 struct intel_context *ce = container_of(ref, typeof(*ce), ref); 493 494 GEM_BUG_ON(intel_context_is_pinned(ce)); 495 496 if (ce->state) 497 __ring_context_fini(ce); 498 499 intel_context_fini(ce); 500 intel_context_free(ce); 501 } 502 503 static int __context_pin_ppgtt(struct intel_context *ce) 504 { 505 struct i915_address_space *vm; 506 int err = 0; 507 508 vm = vm_alias(ce->vm); 509 if (vm) 510 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); 511 512 return err; 513 } 514 515 static void __context_unpin_ppgtt(struct intel_context *ce) 516 { 517 struct i915_address_space *vm; 518 519 vm = vm_alias(ce->vm); 520 if (vm) 521 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); 522 } 523 524 static void ring_context_unpin(struct intel_context *ce) 525 { 526 __context_unpin_ppgtt(ce); 527 } 528 529 static struct i915_vma * 530 alloc_context_vma(struct intel_engine_cs *engine) 531 { 532 struct drm_i915_private *i915 = engine->i915; 533 struct drm_i915_gem_object *obj; 534 struct i915_vma *vma; 535 int err; 536 537 obj = i915_gem_object_create_shmem(i915, engine->context_size); 538 if (IS_ERR(obj)) 539 return ERR_CAST(obj); 540 541 /* 542 * Try to make the context utilize L3 as well as LLC. 543 * 544 * On VLV we don't have L3 controls in the PTEs so we 545 * shouldn't touch the cache level, especially as that 546 * would make the object snooped which might have a 547 * negative performance impact. 548 * 549 * Snooping is required on non-llc platforms in execlist 550 * mode, but since all GGTT accesses use PAT entry 0 we 551 * get snooping anyway regardless of cache_level. 552 * 553 * This is only applicable for Ivy Bridge devices since 554 * later platforms don't have L3 control bits in the PTE. 555 */ 556 if (IS_IVYBRIDGE(i915)) 557 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); 558 559 if (engine->default_state) { 560 void *vaddr; 561 562 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 563 if (IS_ERR(vaddr)) { 564 err = PTR_ERR(vaddr); 565 goto err_obj; 566 } 567 568 shmem_read(engine->default_state, 0, 569 vaddr, engine->context_size); 570 571 i915_gem_object_flush_map(obj); 572 i915_gem_object_unpin_map(obj); 573 } 574 575 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 576 if (IS_ERR(vma)) { 577 err = PTR_ERR(vma); 578 goto err_obj; 579 } 580 581 return vma; 582 583 err_obj: 584 i915_gem_object_put(obj); 585 return ERR_PTR(err); 586 } 587 588 static int ring_context_alloc(struct intel_context *ce) 589 { 590 struct intel_engine_cs *engine = ce->engine; 591 592 /* One ringbuffer to rule them all */ 593 GEM_BUG_ON(!engine->legacy.ring); 594 ce->ring = engine->legacy.ring; 595 ce->timeline = intel_timeline_get(engine->legacy.timeline); 596 597 GEM_BUG_ON(ce->state); 598 if (engine->context_size) { 599 struct i915_vma *vma; 600 601 vma = alloc_context_vma(engine); 602 if (IS_ERR(vma)) 603 return PTR_ERR(vma); 604 605 ce->state = vma; 606 if (engine->default_state) 607 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 608 } 609 610 return 0; 611 } 612 613 static int ring_context_pin(struct intel_context *ce) 614 { 615 return __context_pin_ppgtt(ce); 616 } 617 618 static void ring_context_reset(struct intel_context *ce) 619 { 620 intel_ring_reset(ce->ring, ce->ring->emit); 621 } 622 623 static const struct intel_context_ops ring_context_ops = { 624 .alloc = ring_context_alloc, 625 626 .pin = ring_context_pin, 627 .unpin = ring_context_unpin, 628 629 .enter = intel_context_enter_engine, 630 .exit = intel_context_exit_engine, 631 632 .reset = ring_context_reset, 633 .destroy = ring_context_destroy, 634 }; 635 636 static int load_pd_dir(struct i915_request *rq, 637 const struct i915_ppgtt *ppgtt, 638 u32 valid) 639 { 640 const struct intel_engine_cs * const engine = rq->engine; 641 u32 *cs; 642 643 cs = intel_ring_begin(rq, 12); 644 if (IS_ERR(cs)) 645 return PTR_ERR(cs); 646 647 *cs++ = MI_LOAD_REGISTER_IMM(1); 648 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); 649 *cs++ = valid; 650 651 *cs++ = MI_LOAD_REGISTER_IMM(1); 652 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 653 *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; 654 655 /* Stall until the page table load is complete? */ 656 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 657 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 658 *cs++ = intel_gt_scratch_offset(engine->gt, 659 INTEL_GT_SCRATCH_FIELD_DEFAULT); 660 661 *cs++ = MI_LOAD_REGISTER_IMM(1); 662 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); 663 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE); 664 665 intel_ring_advance(rq, cs); 666 667 return rq->engine->emit_flush(rq, EMIT_FLUSH); 668 } 669 670 static inline int mi_set_context(struct i915_request *rq, 671 struct intel_context *ce, 672 u32 flags) 673 { 674 struct drm_i915_private *i915 = rq->i915; 675 struct intel_engine_cs *engine = rq->engine; 676 enum intel_engine_id id; 677 const int num_engines = 678 IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; 679 bool force_restore = false; 680 int len; 681 u32 *cs; 682 683 len = 4; 684 if (IS_GEN(i915, 7)) 685 len += 2 + (num_engines ? 4 * num_engines + 6 : 0); 686 else if (IS_GEN(i915, 5)) 687 len += 2; 688 if (flags & MI_FORCE_RESTORE) { 689 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 690 flags &= ~MI_FORCE_RESTORE; 691 force_restore = true; 692 len += 2; 693 } 694 695 cs = intel_ring_begin(rq, len); 696 if (IS_ERR(cs)) 697 return PTR_ERR(cs); 698 699 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 700 if (IS_GEN(i915, 7)) { 701 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 702 if (num_engines) { 703 struct intel_engine_cs *signaller; 704 705 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 706 for_each_engine(signaller, engine->gt, id) { 707 if (signaller == engine) 708 continue; 709 710 *cs++ = i915_mmio_reg_offset( 711 RING_PSMI_CTL(signaller->mmio_base)); 712 *cs++ = _MASKED_BIT_ENABLE( 713 GEN6_PSMI_SLEEP_MSG_DISABLE); 714 } 715 } 716 } else if (IS_GEN(i915, 5)) { 717 /* 718 * This w/a is only listed for pre-production ilk a/b steppings, 719 * but is also mentioned for programming the powerctx. To be 720 * safe, just apply the workaround; we do not use SyncFlush so 721 * this should never take effect and so be a no-op! 722 */ 723 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; 724 } 725 726 if (force_restore) { 727 /* 728 * The HW doesn't handle being told to restore the current 729 * context very well. Quite often it likes goes to go off and 730 * sulk, especially when it is meant to be reloading PP_DIR. 731 * A very simple fix to force the reload is to simply switch 732 * away from the current context and back again. 733 * 734 * Note that the kernel_context will contain random state 735 * following the INHIBIT_RESTORE. We accept this since we 736 * never use the kernel_context state; it is merely a 737 * placeholder we use to flush other contexts. 738 */ 739 *cs++ = MI_SET_CONTEXT; 740 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | 741 MI_MM_SPACE_GTT | 742 MI_RESTORE_INHIBIT; 743 } 744 745 *cs++ = MI_NOOP; 746 *cs++ = MI_SET_CONTEXT; 747 *cs++ = i915_ggtt_offset(ce->state) | flags; 748 /* 749 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 750 * WaMiSetContext_Hang:snb,ivb,vlv 751 */ 752 *cs++ = MI_NOOP; 753 754 if (IS_GEN(i915, 7)) { 755 if (num_engines) { 756 struct intel_engine_cs *signaller; 757 i915_reg_t last_reg = {}; /* keep gcc quiet */ 758 759 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 760 for_each_engine(signaller, engine->gt, id) { 761 if (signaller == engine) 762 continue; 763 764 last_reg = RING_PSMI_CTL(signaller->mmio_base); 765 *cs++ = i915_mmio_reg_offset(last_reg); 766 *cs++ = _MASKED_BIT_DISABLE( 767 GEN6_PSMI_SLEEP_MSG_DISABLE); 768 } 769 770 /* Insert a delay before the next switch! */ 771 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 772 *cs++ = i915_mmio_reg_offset(last_reg); 773 *cs++ = intel_gt_scratch_offset(engine->gt, 774 INTEL_GT_SCRATCH_FIELD_DEFAULT); 775 *cs++ = MI_NOOP; 776 } 777 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 778 } else if (IS_GEN(i915, 5)) { 779 *cs++ = MI_SUSPEND_FLUSH; 780 } 781 782 intel_ring_advance(rq, cs); 783 784 return 0; 785 } 786 787 static int remap_l3_slice(struct i915_request *rq, int slice) 788 { 789 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; 790 int i; 791 792 if (!remap_info) 793 return 0; 794 795 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2); 796 if (IS_ERR(cs)) 797 return PTR_ERR(cs); 798 799 /* 800 * Note: We do not worry about the concurrent register cacheline hang 801 * here because no other code should access these registers other than 802 * at initialization time. 803 */ 804 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4); 805 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { 806 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 807 *cs++ = remap_info[i]; 808 } 809 *cs++ = MI_NOOP; 810 intel_ring_advance(rq, cs); 811 812 return 0; 813 } 814 815 static int remap_l3(struct i915_request *rq) 816 { 817 struct i915_gem_context *ctx = i915_request_gem_context(rq); 818 int i, err; 819 820 if (!ctx || !ctx->remap_slice) 821 return 0; 822 823 for (i = 0; i < MAX_L3_SLICES; i++) { 824 if (!(ctx->remap_slice & BIT(i))) 825 continue; 826 827 err = remap_l3_slice(rq, i); 828 if (err) 829 return err; 830 } 831 832 ctx->remap_slice = 0; 833 return 0; 834 } 835 836 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) 837 { 838 int ret; 839 840 if (!vm) 841 return 0; 842 843 ret = rq->engine->emit_flush(rq, EMIT_FLUSH); 844 if (ret) 845 return ret; 846 847 /* 848 * Not only do we need a full barrier (post-sync write) after 849 * invalidating the TLBs, but we need to wait a little bit 850 * longer. Whether this is merely delaying us, or the 851 * subsequent flush is a key part of serialising with the 852 * post-sync op, this extra pass appears vital before a 853 * mm switch! 854 */ 855 ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G); 856 if (ret) 857 return ret; 858 859 return rq->engine->emit_flush(rq, EMIT_INVALIDATE); 860 } 861 862 static int clear_residuals(struct i915_request *rq) 863 { 864 struct intel_engine_cs *engine = rq->engine; 865 int ret; 866 867 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); 868 if (ret) 869 return ret; 870 871 if (engine->kernel_context->state) { 872 ret = mi_set_context(rq, 873 engine->kernel_context, 874 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT); 875 if (ret) 876 return ret; 877 } 878 879 ret = engine->emit_bb_start(rq, 880 engine->wa_ctx.vma->node.start, 0, 881 0); 882 if (ret) 883 return ret; 884 885 ret = engine->emit_flush(rq, EMIT_FLUSH); 886 if (ret) 887 return ret; 888 889 /* Always invalidate before the next switch_mm() */ 890 return engine->emit_flush(rq, EMIT_INVALIDATE); 891 } 892 893 static int switch_context(struct i915_request *rq) 894 { 895 struct intel_engine_cs *engine = rq->engine; 896 struct intel_context *ce = rq->context; 897 void **residuals = NULL; 898 int ret; 899 900 GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); 901 902 if (engine->wa_ctx.vma && ce != engine->kernel_context) { 903 if (engine->wa_ctx.vma->private != ce) { 904 ret = clear_residuals(rq); 905 if (ret) 906 return ret; 907 908 residuals = &engine->wa_ctx.vma->private; 909 } 910 } 911 912 ret = switch_mm(rq, vm_alias(ce->vm)); 913 if (ret) 914 return ret; 915 916 if (ce->state) { 917 u32 flags; 918 919 GEM_BUG_ON(engine->id != RCS0); 920 921 /* For resource streamer on HSW+ and power context elsewhere */ 922 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); 923 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); 924 925 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; 926 if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) 927 flags |= MI_RESTORE_EXT_STATE_EN; 928 else 929 flags |= MI_RESTORE_INHIBIT; 930 931 ret = mi_set_context(rq, ce, flags); 932 if (ret) 933 return ret; 934 } 935 936 ret = remap_l3(rq); 937 if (ret) 938 return ret; 939 940 /* 941 * Now past the point of no return, this request _will_ be emitted. 942 * 943 * Or at least this preamble will be emitted, the request may be 944 * interrupted prior to submitting the user payload. If so, we 945 * still submit the "empty" request in order to preserve global 946 * state tracking such as this, our tracking of the current 947 * dirty context. 948 */ 949 if (residuals) { 950 intel_context_put(*residuals); 951 *residuals = intel_context_get(ce); 952 } 953 954 return 0; 955 } 956 957 static int ring_request_alloc(struct i915_request *request) 958 { 959 int ret; 960 961 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 962 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); 963 964 /* 965 * Flush enough space to reduce the likelihood of waiting after 966 * we start building the request - in which case we will just 967 * have to repeat work. 968 */ 969 request->reserved_space += LEGACY_REQUEST_SIZE; 970 971 /* Unconditionally invalidate GPU caches and TLBs. */ 972 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 973 if (ret) 974 return ret; 975 976 ret = switch_context(request); 977 if (ret) 978 return ret; 979 980 request->reserved_space -= LEGACY_REQUEST_SIZE; 981 return 0; 982 } 983 984 static void gen6_bsd_submit_request(struct i915_request *request) 985 { 986 struct intel_uncore *uncore = request->engine->uncore; 987 988 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 989 990 /* Every tail move must follow the sequence below */ 991 992 /* Disable notification that the ring is IDLE. The GT 993 * will then assume that it is busy and bring it out of rc6. 994 */ 995 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 996 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 997 998 /* Clear the context id. Here be magic! */ 999 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); 1000 1001 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1002 if (__intel_wait_for_register_fw(uncore, 1003 GEN6_BSD_SLEEP_PSMI_CONTROL, 1004 GEN6_BSD_SLEEP_INDICATOR, 1005 0, 1006 1000, 0, NULL)) 1007 drm_err(&uncore->i915->drm, 1008 "timed out waiting for the BSD ring to wake up\n"); 1009 1010 /* Now that the ring is fully powered up, update the tail */ 1011 i9xx_submit_request(request); 1012 1013 /* Let the ring send IDLE messages to the GT again, 1014 * and so let it sleep to conserve power when idle. 1015 */ 1016 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 1017 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1018 1019 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1020 } 1021 1022 static void i9xx_set_default_submission(struct intel_engine_cs *engine) 1023 { 1024 engine->submit_request = i9xx_submit_request; 1025 1026 engine->park = NULL; 1027 engine->unpark = NULL; 1028 } 1029 1030 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 1031 { 1032 i9xx_set_default_submission(engine); 1033 engine->submit_request = gen6_bsd_submit_request; 1034 } 1035 1036 static void ring_release(struct intel_engine_cs *engine) 1037 { 1038 struct drm_i915_private *dev_priv = engine->i915; 1039 1040 drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 && 1041 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 1042 1043 intel_engine_cleanup_common(engine); 1044 1045 if (engine->wa_ctx.vma) { 1046 intel_context_put(engine->wa_ctx.vma->private); 1047 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1048 } 1049 1050 intel_ring_unpin(engine->legacy.ring); 1051 intel_ring_put(engine->legacy.ring); 1052 1053 intel_timeline_unpin(engine->legacy.timeline); 1054 intel_timeline_put(engine->legacy.timeline); 1055 } 1056 1057 static void setup_irq(struct intel_engine_cs *engine) 1058 { 1059 struct drm_i915_private *i915 = engine->i915; 1060 1061 if (INTEL_GEN(i915) >= 6) { 1062 engine->irq_enable = gen6_irq_enable; 1063 engine->irq_disable = gen6_irq_disable; 1064 } else if (INTEL_GEN(i915) >= 5) { 1065 engine->irq_enable = gen5_irq_enable; 1066 engine->irq_disable = gen5_irq_disable; 1067 } else if (INTEL_GEN(i915) >= 3) { 1068 engine->irq_enable = gen3_irq_enable; 1069 engine->irq_disable = gen3_irq_disable; 1070 } else { 1071 engine->irq_enable = gen2_irq_enable; 1072 engine->irq_disable = gen2_irq_disable; 1073 } 1074 } 1075 1076 static void setup_common(struct intel_engine_cs *engine) 1077 { 1078 struct drm_i915_private *i915 = engine->i915; 1079 1080 /* gen8+ are only supported with execlists */ 1081 GEM_BUG_ON(INTEL_GEN(i915) >= 8); 1082 1083 setup_irq(engine); 1084 1085 engine->resume = xcs_resume; 1086 engine->reset.prepare = reset_prepare; 1087 engine->reset.rewind = reset_rewind; 1088 engine->reset.cancel = reset_cancel; 1089 engine->reset.finish = reset_finish; 1090 1091 engine->cops = &ring_context_ops; 1092 engine->request_alloc = ring_request_alloc; 1093 1094 /* 1095 * Using a global execution timeline; the previous final breadcrumb is 1096 * equivalent to our next initial bread so we can elide 1097 * engine->emit_init_breadcrumb(). 1098 */ 1099 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; 1100 if (IS_GEN(i915, 5)) 1101 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 1102 1103 engine->set_default_submission = i9xx_set_default_submission; 1104 1105 if (INTEL_GEN(i915) >= 6) 1106 engine->emit_bb_start = gen6_emit_bb_start; 1107 else if (INTEL_GEN(i915) >= 4) 1108 engine->emit_bb_start = gen4_emit_bb_start; 1109 else if (IS_I830(i915) || IS_I845G(i915)) 1110 engine->emit_bb_start = i830_emit_bb_start; 1111 else 1112 engine->emit_bb_start = gen3_emit_bb_start; 1113 } 1114 1115 static void setup_rcs(struct intel_engine_cs *engine) 1116 { 1117 struct drm_i915_private *i915 = engine->i915; 1118 1119 if (HAS_L3_DPF(i915)) 1120 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1121 1122 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1123 1124 if (INTEL_GEN(i915) >= 7) { 1125 engine->emit_flush = gen7_emit_flush_rcs; 1126 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs; 1127 } else if (IS_GEN(i915, 6)) { 1128 engine->emit_flush = gen6_emit_flush_rcs; 1129 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs; 1130 } else if (IS_GEN(i915, 5)) { 1131 engine->emit_flush = gen4_emit_flush_rcs; 1132 } else { 1133 if (INTEL_GEN(i915) < 4) 1134 engine->emit_flush = gen2_emit_flush; 1135 else 1136 engine->emit_flush = gen4_emit_flush_rcs; 1137 engine->irq_enable_mask = I915_USER_INTERRUPT; 1138 } 1139 1140 if (IS_HASWELL(i915)) 1141 engine->emit_bb_start = hsw_emit_bb_start; 1142 1143 engine->resume = rcs_resume; 1144 } 1145 1146 static void setup_vcs(struct intel_engine_cs *engine) 1147 { 1148 struct drm_i915_private *i915 = engine->i915; 1149 1150 if (INTEL_GEN(i915) >= 6) { 1151 /* gen6 bsd needs a special wa for tail updates */ 1152 if (IS_GEN(i915, 6)) 1153 engine->set_default_submission = gen6_bsd_set_default_submission; 1154 engine->emit_flush = gen6_emit_flush_vcs; 1155 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1156 1157 if (IS_GEN(i915, 6)) 1158 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1159 else 1160 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1161 } else { 1162 engine->emit_flush = gen4_emit_flush_vcs; 1163 if (IS_GEN(i915, 5)) 1164 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 1165 else 1166 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1167 } 1168 } 1169 1170 static void setup_bcs(struct intel_engine_cs *engine) 1171 { 1172 struct drm_i915_private *i915 = engine->i915; 1173 1174 engine->emit_flush = gen6_emit_flush_xcs; 1175 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 1176 1177 if (IS_GEN(i915, 6)) 1178 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1179 else 1180 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1181 } 1182 1183 static void setup_vecs(struct intel_engine_cs *engine) 1184 { 1185 struct drm_i915_private *i915 = engine->i915; 1186 1187 GEM_BUG_ON(INTEL_GEN(i915) < 7); 1188 1189 engine->emit_flush = gen6_emit_flush_xcs; 1190 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 1191 engine->irq_enable = hsw_irq_enable_vecs; 1192 engine->irq_disable = hsw_irq_disable_vecs; 1193 1194 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1195 } 1196 1197 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, 1198 struct i915_vma * const vma) 1199 { 1200 return gen7_setup_clear_gpr_bb(engine, vma); 1201 } 1202 1203 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine) 1204 { 1205 struct drm_i915_gem_object *obj; 1206 struct i915_vma *vma; 1207 int size; 1208 int err; 1209 1210 size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); 1211 if (size <= 0) 1212 return size; 1213 1214 size = ALIGN(size, PAGE_SIZE); 1215 obj = i915_gem_object_create_internal(engine->i915, size); 1216 if (IS_ERR(obj)) 1217 return PTR_ERR(obj); 1218 1219 vma = i915_vma_instance(obj, engine->gt->vm, NULL); 1220 if (IS_ERR(vma)) { 1221 err = PTR_ERR(vma); 1222 goto err_obj; 1223 } 1224 1225 vma->private = intel_context_create(engine); /* dummy residuals */ 1226 if (IS_ERR(vma->private)) { 1227 err = PTR_ERR(vma->private); 1228 goto err_obj; 1229 } 1230 1231 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH); 1232 if (err) 1233 goto err_private; 1234 1235 err = i915_vma_sync(vma); 1236 if (err) 1237 goto err_unpin; 1238 1239 err = gen7_ctx_switch_bb_setup(engine, vma); 1240 if (err) 1241 goto err_unpin; 1242 1243 engine->wa_ctx.vma = vma; 1244 return 0; 1245 1246 err_unpin: 1247 i915_vma_unpin(vma); 1248 err_private: 1249 intel_context_put(vma->private); 1250 err_obj: 1251 i915_gem_object_put(obj); 1252 return err; 1253 } 1254 1255 int intel_ring_submission_setup(struct intel_engine_cs *engine) 1256 { 1257 struct intel_timeline *timeline; 1258 struct intel_ring *ring; 1259 int err; 1260 1261 setup_common(engine); 1262 1263 switch (engine->class) { 1264 case RENDER_CLASS: 1265 setup_rcs(engine); 1266 break; 1267 case VIDEO_DECODE_CLASS: 1268 setup_vcs(engine); 1269 break; 1270 case COPY_ENGINE_CLASS: 1271 setup_bcs(engine); 1272 break; 1273 case VIDEO_ENHANCEMENT_CLASS: 1274 setup_vecs(engine); 1275 break; 1276 default: 1277 MISSING_CASE(engine->class); 1278 return -ENODEV; 1279 } 1280 1281 timeline = intel_timeline_create(engine->gt, engine->status_page.vma); 1282 if (IS_ERR(timeline)) { 1283 err = PTR_ERR(timeline); 1284 goto err; 1285 } 1286 GEM_BUG_ON(timeline->has_initial_breadcrumb); 1287 1288 err = intel_timeline_pin(timeline); 1289 if (err) 1290 goto err_timeline; 1291 1292 ring = intel_engine_create_ring(engine, SZ_16K); 1293 if (IS_ERR(ring)) { 1294 err = PTR_ERR(ring); 1295 goto err_timeline_unpin; 1296 } 1297 1298 err = intel_ring_pin(ring); 1299 if (err) 1300 goto err_ring; 1301 1302 GEM_BUG_ON(engine->legacy.ring); 1303 engine->legacy.ring = ring; 1304 engine->legacy.timeline = timeline; 1305 1306 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 1307 1308 if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) { 1309 err = gen7_ctx_switch_bb_init(engine); 1310 if (err) 1311 goto err_ring_unpin; 1312 } 1313 1314 /* Finally, take ownership and responsibility for cleanup! */ 1315 engine->release = ring_release; 1316 1317 return 0; 1318 1319 err_ring_unpin: 1320 intel_ring_unpin(ring); 1321 err_ring: 1322 intel_ring_put(ring); 1323 err_timeline_unpin: 1324 intel_timeline_unpin(timeline); 1325 err_timeline: 1326 intel_timeline_put(timeline); 1327 err: 1328 intel_engine_cleanup_common(engine); 1329 return err; 1330 } 1331 1332 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1333 #include "selftest_ring_submission.c" 1334 #endif 1335