1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include "gen2_engine_cs.h" 31 #include "gen6_engine_cs.h" 32 #include "gen6_ppgtt.h" 33 #include "gen7_renderclear.h" 34 #include "i915_drv.h" 35 #include "intel_breadcrumbs.h" 36 #include "intel_context.h" 37 #include "intel_gt.h" 38 #include "intel_reset.h" 39 #include "intel_ring.h" 40 #include "shmem_utils.h" 41 42 /* Rough estimate of the typical request size, performing a flush, 43 * set-context and then emitting the batch. 44 */ 45 #define LEGACY_REQUEST_SIZE 200 46 47 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 48 { 49 /* 50 * Keep the render interrupt unmasked as this papers over 51 * lost interrupts following a reset. 52 */ 53 if (engine->class == RENDER_CLASS) { 54 if (INTEL_GEN(engine->i915) >= 6) 55 mask &= ~BIT(0); 56 else 57 mask &= ~I915_USER_INTERRUPT; 58 } 59 60 intel_engine_set_hwsp_writemask(engine, mask); 61 } 62 63 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 64 { 65 u32 addr; 66 67 addr = lower_32_bits(phys); 68 if (INTEL_GEN(engine->i915) >= 4) 69 addr |= (phys >> 28) & 0xf0; 70 71 intel_uncore_write(engine->uncore, HWS_PGA, addr); 72 } 73 74 static struct page *status_page(struct intel_engine_cs *engine) 75 { 76 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 77 78 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 79 return sg_page(obj->mm.pages->sgl); 80 } 81 82 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 83 { 84 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 85 set_hwstam(engine, ~0u); 86 } 87 88 static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 89 { 90 i915_reg_t hwsp; 91 92 /* 93 * The ring status page addresses are no longer next to the rest of 94 * the ring registers as of gen7. 95 */ 96 if (IS_GEN(engine->i915, 7)) { 97 switch (engine->id) { 98 /* 99 * No more rings exist on Gen7. Default case is only to shut up 100 * gcc switch check warning. 101 */ 102 default: 103 GEM_BUG_ON(engine->id); 104 /* fallthrough */ 105 case RCS0: 106 hwsp = RENDER_HWS_PGA_GEN7; 107 break; 108 case BCS0: 109 hwsp = BLT_HWS_PGA_GEN7; 110 break; 111 case VCS0: 112 hwsp = BSD_HWS_PGA_GEN7; 113 break; 114 case VECS0: 115 hwsp = VEBOX_HWS_PGA_GEN7; 116 break; 117 } 118 } else if (IS_GEN(engine->i915, 6)) { 119 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 120 } else { 121 hwsp = RING_HWS_PGA(engine->mmio_base); 122 } 123 124 intel_uncore_write(engine->uncore, hwsp, offset); 125 intel_uncore_posting_read(engine->uncore, hwsp); 126 } 127 128 static void flush_cs_tlb(struct intel_engine_cs *engine) 129 { 130 struct drm_i915_private *dev_priv = engine->i915; 131 132 if (!IS_GEN_RANGE(dev_priv, 6, 7)) 133 return; 134 135 /* ring should be idle before issuing a sync flush*/ 136 drm_WARN_ON(&dev_priv->drm, 137 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 138 139 ENGINE_WRITE(engine, RING_INSTPM, 140 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 141 INSTPM_SYNC_FLUSH)); 142 if (intel_wait_for_register(engine->uncore, 143 RING_INSTPM(engine->mmio_base), 144 INSTPM_SYNC_FLUSH, 0, 145 1000)) 146 drm_err(&dev_priv->drm, 147 "%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 148 engine->name); 149 } 150 151 static void ring_setup_status_page(struct intel_engine_cs *engine) 152 { 153 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 154 set_hwstam(engine, ~0u); 155 156 flush_cs_tlb(engine); 157 } 158 159 static bool stop_ring(struct intel_engine_cs *engine) 160 { 161 struct drm_i915_private *dev_priv = engine->i915; 162 163 if (INTEL_GEN(dev_priv) > 2) { 164 ENGINE_WRITE(engine, 165 RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING)); 166 if (intel_wait_for_register(engine->uncore, 167 RING_MI_MODE(engine->mmio_base), 168 MODE_IDLE, 169 MODE_IDLE, 170 1000)) { 171 drm_err(&dev_priv->drm, 172 "%s : timed out trying to stop ring\n", 173 engine->name); 174 175 /* 176 * Sometimes we observe that the idle flag is not 177 * set even though the ring is empty. So double 178 * check before giving up. 179 */ 180 if (ENGINE_READ(engine, RING_HEAD) != 181 ENGINE_READ(engine, RING_TAIL)) 182 return false; 183 } 184 } 185 186 ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL)); 187 188 ENGINE_WRITE(engine, RING_HEAD, 0); 189 ENGINE_WRITE(engine, RING_TAIL, 0); 190 191 /* The ring must be empty before it is disabled */ 192 ENGINE_WRITE(engine, RING_CTL, 0); 193 194 return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0; 195 } 196 197 static struct i915_address_space *vm_alias(struct i915_address_space *vm) 198 { 199 if (i915_is_ggtt(vm)) 200 vm = &i915_vm_to_ggtt(vm)->alias->vm; 201 202 return vm; 203 } 204 205 static void set_pp_dir(struct intel_engine_cs *engine) 206 { 207 struct i915_address_space *vm = vm_alias(engine->gt->vm); 208 209 if (vm) { 210 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 211 212 ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 213 ENGINE_WRITE(engine, RING_PP_DIR_BASE, 214 px_base(ppgtt->pd)->ggtt_offset << 10); 215 } 216 } 217 218 static int xcs_resume(struct intel_engine_cs *engine) 219 { 220 struct drm_i915_private *dev_priv = engine->i915; 221 struct intel_ring *ring = engine->legacy.ring; 222 int ret = 0; 223 224 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", 225 ring->head, ring->tail); 226 227 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); 228 229 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 230 if (!stop_ring(engine)) { 231 /* G45 ring initialization often fails to reset head to zero */ 232 drm_dbg(&dev_priv->drm, "%s head not reset to zero " 233 "ctl %08x head %08x tail %08x start %08x\n", 234 engine->name, 235 ENGINE_READ(engine, RING_CTL), 236 ENGINE_READ(engine, RING_HEAD), 237 ENGINE_READ(engine, RING_TAIL), 238 ENGINE_READ(engine, RING_START)); 239 240 if (!stop_ring(engine)) { 241 drm_err(&dev_priv->drm, 242 "failed to set %s head to zero " 243 "ctl %08x head %08x tail %08x start %08x\n", 244 engine->name, 245 ENGINE_READ(engine, RING_CTL), 246 ENGINE_READ(engine, RING_HEAD), 247 ENGINE_READ(engine, RING_TAIL), 248 ENGINE_READ(engine, RING_START)); 249 ret = -EIO; 250 goto out; 251 } 252 } 253 254 if (HWS_NEEDS_PHYSICAL(dev_priv)) 255 ring_setup_phys_status_page(engine); 256 else 257 ring_setup_status_page(engine); 258 259 intel_breadcrumbs_reset(engine->breadcrumbs); 260 261 /* Enforce ordering by reading HEAD register back */ 262 ENGINE_POSTING_READ(engine, RING_HEAD); 263 264 /* 265 * Initialize the ring. This must happen _after_ we've cleared the ring 266 * registers with the above sequence (the readback of the HEAD registers 267 * also enforces ordering), otherwise the hw might lose the new ring 268 * register values. 269 */ 270 ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma)); 271 272 /* Check that the ring offsets point within the ring! */ 273 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 274 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 275 intel_ring_update_space(ring); 276 277 set_pp_dir(engine); 278 279 /* First wake the ring up to an empty/idle ring */ 280 ENGINE_WRITE(engine, RING_HEAD, ring->head); 281 ENGINE_WRITE(engine, RING_TAIL, ring->head); 282 ENGINE_POSTING_READ(engine, RING_TAIL); 283 284 ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); 285 286 /* If the head is still not zero, the ring is dead */ 287 if (intel_wait_for_register(engine->uncore, 288 RING_CTL(engine->mmio_base), 289 RING_VALID, RING_VALID, 290 50)) { 291 drm_err(&dev_priv->drm, "%s initialization failed " 292 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 293 engine->name, 294 ENGINE_READ(engine, RING_CTL), 295 ENGINE_READ(engine, RING_CTL) & RING_VALID, 296 ENGINE_READ(engine, RING_HEAD), ring->head, 297 ENGINE_READ(engine, RING_TAIL), ring->tail, 298 ENGINE_READ(engine, RING_START), 299 i915_ggtt_offset(ring->vma)); 300 ret = -EIO; 301 goto out; 302 } 303 304 if (INTEL_GEN(dev_priv) > 2) 305 ENGINE_WRITE(engine, 306 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 307 308 /* Now awake, let it get started */ 309 if (ring->tail != ring->head) { 310 ENGINE_WRITE(engine, RING_TAIL, ring->tail); 311 ENGINE_POSTING_READ(engine, RING_TAIL); 312 } 313 314 /* Papering over lost _interrupts_ immediately following the restart */ 315 intel_engine_signal_breadcrumbs(engine); 316 out: 317 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); 318 319 return ret; 320 } 321 322 static void reset_prepare(struct intel_engine_cs *engine) 323 { 324 struct intel_uncore *uncore = engine->uncore; 325 const u32 base = engine->mmio_base; 326 327 /* 328 * We stop engines, otherwise we might get failed reset and a 329 * dead gpu (on elk). Also as modern gpu as kbl can suffer 330 * from system hang if batchbuffer is progressing when 331 * the reset is issued, regardless of READY_TO_RESET ack. 332 * Thus assume it is best to stop engines on all gens 333 * where we have a gpu reset. 334 * 335 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 336 * 337 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 338 * 339 * FIXME: Wa for more modern gens needs to be validated 340 */ 341 ENGINE_TRACE(engine, "\n"); 342 343 if (intel_engine_stop_cs(engine)) 344 ENGINE_TRACE(engine, "timed out on STOP_RING\n"); 345 346 intel_uncore_write_fw(uncore, 347 RING_HEAD(base), 348 intel_uncore_read_fw(uncore, RING_TAIL(base))); 349 intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ 350 351 intel_uncore_write_fw(uncore, RING_HEAD(base), 0); 352 intel_uncore_write_fw(uncore, RING_TAIL(base), 0); 353 intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); 354 355 /* The ring must be empty before it is disabled */ 356 intel_uncore_write_fw(uncore, RING_CTL(base), 0); 357 358 /* Check acts as a post */ 359 if (intel_uncore_read_fw(uncore, RING_HEAD(base))) 360 ENGINE_TRACE(engine, "ring head [%x] not parked\n", 361 intel_uncore_read_fw(uncore, RING_HEAD(base))); 362 } 363 364 static void reset_rewind(struct intel_engine_cs *engine, bool stalled) 365 { 366 struct i915_request *pos, *rq; 367 unsigned long flags; 368 u32 head; 369 370 rq = NULL; 371 spin_lock_irqsave(&engine->active.lock, flags); 372 list_for_each_entry(pos, &engine->active.requests, sched.link) { 373 if (!i915_request_completed(pos)) { 374 rq = pos; 375 break; 376 } 377 } 378 379 /* 380 * The guilty request will get skipped on a hung engine. 381 * 382 * Users of client default contexts do not rely on logical 383 * state preserved between batches so it is safe to execute 384 * queued requests following the hang. Non default contexts 385 * rely on preserved state, so skipping a batch loses the 386 * evolution of the state and it needs to be considered corrupted. 387 * Executing more queued batches on top of corrupted state is 388 * risky. But we take the risk by trying to advance through 389 * the queued requests in order to make the client behaviour 390 * more predictable around resets, by not throwing away random 391 * amount of batches it has prepared for execution. Sophisticated 392 * clients can use gem_reset_stats_ioctl and dma fence status 393 * (exported via sync_file info ioctl on explicit fences) to observe 394 * when it loses the context state and should rebuild accordingly. 395 * 396 * The context ban, and ultimately the client ban, mechanism are safety 397 * valves if client submission ends up resulting in nothing more than 398 * subsequent hangs. 399 */ 400 401 if (rq) { 402 /* 403 * Try to restore the logical GPU state to match the 404 * continuation of the request queue. If we skip the 405 * context/PD restore, then the next request may try to execute 406 * assuming that its context is valid and loaded on the GPU and 407 * so may try to access invalid memory, prompting repeated GPU 408 * hangs. 409 * 410 * If the request was guilty, we still restore the logical 411 * state in case the next request requires it (e.g. the 412 * aliasing ppgtt), but skip over the hung batch. 413 * 414 * If the request was innocent, we try to replay the request 415 * with the restored context. 416 */ 417 __i915_request_reset(rq, stalled); 418 419 GEM_BUG_ON(rq->ring != engine->legacy.ring); 420 head = rq->head; 421 } else { 422 head = engine->legacy.ring->tail; 423 } 424 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 425 426 spin_unlock_irqrestore(&engine->active.lock, flags); 427 } 428 429 static void reset_finish(struct intel_engine_cs *engine) 430 { 431 } 432 433 static void reset_cancel(struct intel_engine_cs *engine) 434 { 435 struct i915_request *request; 436 unsigned long flags; 437 438 spin_lock_irqsave(&engine->active.lock, flags); 439 440 /* Mark all submitted requests as skipped. */ 441 list_for_each_entry(request, &engine->active.requests, sched.link) { 442 i915_request_set_error_once(request, -EIO); 443 i915_request_mark_complete(request); 444 } 445 446 /* Remaining _unready_ requests will be nop'ed when submitted */ 447 448 spin_unlock_irqrestore(&engine->active.lock, flags); 449 } 450 451 static void i9xx_submit_request(struct i915_request *request) 452 { 453 i915_request_submit(request); 454 wmb(); /* paranoid flush writes out of the WCB before mmio */ 455 456 ENGINE_WRITE(request->engine, RING_TAIL, 457 intel_ring_set_tail(request->ring, request->tail)); 458 } 459 460 static void __ring_context_fini(struct intel_context *ce) 461 { 462 i915_vma_put(ce->state); 463 } 464 465 static void ring_context_destroy(struct kref *ref) 466 { 467 struct intel_context *ce = container_of(ref, typeof(*ce), ref); 468 469 GEM_BUG_ON(intel_context_is_pinned(ce)); 470 471 if (ce->state) 472 __ring_context_fini(ce); 473 474 intel_context_fini(ce); 475 intel_context_free(ce); 476 } 477 478 static int __context_pin_ppgtt(struct intel_context *ce) 479 { 480 struct i915_address_space *vm; 481 int err = 0; 482 483 vm = vm_alias(ce->vm); 484 if (vm) 485 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); 486 487 return err; 488 } 489 490 static void __context_unpin_ppgtt(struct intel_context *ce) 491 { 492 struct i915_address_space *vm; 493 494 vm = vm_alias(ce->vm); 495 if (vm) 496 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); 497 } 498 499 static void ring_context_unpin(struct intel_context *ce) 500 { 501 __context_unpin_ppgtt(ce); 502 } 503 504 static struct i915_vma * 505 alloc_context_vma(struct intel_engine_cs *engine) 506 { 507 struct drm_i915_private *i915 = engine->i915; 508 struct drm_i915_gem_object *obj; 509 struct i915_vma *vma; 510 int err; 511 512 obj = i915_gem_object_create_shmem(i915, engine->context_size); 513 if (IS_ERR(obj)) 514 return ERR_CAST(obj); 515 516 /* 517 * Try to make the context utilize L3 as well as LLC. 518 * 519 * On VLV we don't have L3 controls in the PTEs so we 520 * shouldn't touch the cache level, especially as that 521 * would make the object snooped which might have a 522 * negative performance impact. 523 * 524 * Snooping is required on non-llc platforms in execlist 525 * mode, but since all GGTT accesses use PAT entry 0 we 526 * get snooping anyway regardless of cache_level. 527 * 528 * This is only applicable for Ivy Bridge devices since 529 * later platforms don't have L3 control bits in the PTE. 530 */ 531 if (IS_IVYBRIDGE(i915)) 532 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); 533 534 if (engine->default_state) { 535 void *vaddr; 536 537 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 538 if (IS_ERR(vaddr)) { 539 err = PTR_ERR(vaddr); 540 goto err_obj; 541 } 542 543 shmem_read(engine->default_state, 0, 544 vaddr, engine->context_size); 545 546 i915_gem_object_flush_map(obj); 547 __i915_gem_object_release_map(obj); 548 } 549 550 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 551 if (IS_ERR(vma)) { 552 err = PTR_ERR(vma); 553 goto err_obj; 554 } 555 556 return vma; 557 558 err_obj: 559 i915_gem_object_put(obj); 560 return ERR_PTR(err); 561 } 562 563 static int ring_context_alloc(struct intel_context *ce) 564 { 565 struct intel_engine_cs *engine = ce->engine; 566 567 /* One ringbuffer to rule them all */ 568 GEM_BUG_ON(!engine->legacy.ring); 569 ce->ring = engine->legacy.ring; 570 ce->timeline = intel_timeline_get(engine->legacy.timeline); 571 572 GEM_BUG_ON(ce->state); 573 if (engine->context_size) { 574 struct i915_vma *vma; 575 576 vma = alloc_context_vma(engine); 577 if (IS_ERR(vma)) 578 return PTR_ERR(vma); 579 580 ce->state = vma; 581 if (engine->default_state) 582 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 583 } 584 585 return 0; 586 } 587 588 static int ring_context_pin(struct intel_context *ce) 589 { 590 return __context_pin_ppgtt(ce); 591 } 592 593 static void ring_context_reset(struct intel_context *ce) 594 { 595 intel_ring_reset(ce->ring, ce->ring->emit); 596 } 597 598 static const struct intel_context_ops ring_context_ops = { 599 .alloc = ring_context_alloc, 600 601 .pin = ring_context_pin, 602 .unpin = ring_context_unpin, 603 604 .enter = intel_context_enter_engine, 605 .exit = intel_context_exit_engine, 606 607 .reset = ring_context_reset, 608 .destroy = ring_context_destroy, 609 }; 610 611 static int load_pd_dir(struct i915_request *rq, 612 const struct i915_ppgtt *ppgtt, 613 u32 valid) 614 { 615 const struct intel_engine_cs * const engine = rq->engine; 616 u32 *cs; 617 618 cs = intel_ring_begin(rq, 12); 619 if (IS_ERR(cs)) 620 return PTR_ERR(cs); 621 622 *cs++ = MI_LOAD_REGISTER_IMM(1); 623 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); 624 *cs++ = valid; 625 626 *cs++ = MI_LOAD_REGISTER_IMM(1); 627 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 628 *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; 629 630 /* Stall until the page table load is complete? */ 631 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 632 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 633 *cs++ = intel_gt_scratch_offset(engine->gt, 634 INTEL_GT_SCRATCH_FIELD_DEFAULT); 635 636 *cs++ = MI_LOAD_REGISTER_IMM(1); 637 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); 638 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE); 639 640 intel_ring_advance(rq, cs); 641 642 return rq->engine->emit_flush(rq, EMIT_FLUSH); 643 } 644 645 static inline int mi_set_context(struct i915_request *rq, 646 struct intel_context *ce, 647 u32 flags) 648 { 649 struct intel_engine_cs *engine = rq->engine; 650 struct drm_i915_private *i915 = engine->i915; 651 enum intel_engine_id id; 652 const int num_engines = 653 IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0; 654 bool force_restore = false; 655 int len; 656 u32 *cs; 657 658 len = 4; 659 if (IS_GEN(i915, 7)) 660 len += 2 + (num_engines ? 4 * num_engines + 6 : 0); 661 else if (IS_GEN(i915, 5)) 662 len += 2; 663 if (flags & MI_FORCE_RESTORE) { 664 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 665 flags &= ~MI_FORCE_RESTORE; 666 force_restore = true; 667 len += 2; 668 } 669 670 cs = intel_ring_begin(rq, len); 671 if (IS_ERR(cs)) 672 return PTR_ERR(cs); 673 674 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 675 if (IS_GEN(i915, 7)) { 676 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 677 if (num_engines) { 678 struct intel_engine_cs *signaller; 679 680 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 681 for_each_engine(signaller, engine->gt, id) { 682 if (signaller == engine) 683 continue; 684 685 *cs++ = i915_mmio_reg_offset( 686 RING_PSMI_CTL(signaller->mmio_base)); 687 *cs++ = _MASKED_BIT_ENABLE( 688 GEN6_PSMI_SLEEP_MSG_DISABLE); 689 } 690 } 691 } else if (IS_GEN(i915, 5)) { 692 /* 693 * This w/a is only listed for pre-production ilk a/b steppings, 694 * but is also mentioned for programming the powerctx. To be 695 * safe, just apply the workaround; we do not use SyncFlush so 696 * this should never take effect and so be a no-op! 697 */ 698 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; 699 } 700 701 if (force_restore) { 702 /* 703 * The HW doesn't handle being told to restore the current 704 * context very well. Quite often it likes goes to go off and 705 * sulk, especially when it is meant to be reloading PP_DIR. 706 * A very simple fix to force the reload is to simply switch 707 * away from the current context and back again. 708 * 709 * Note that the kernel_context will contain random state 710 * following the INHIBIT_RESTORE. We accept this since we 711 * never use the kernel_context state; it is merely a 712 * placeholder we use to flush other contexts. 713 */ 714 *cs++ = MI_SET_CONTEXT; 715 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | 716 MI_MM_SPACE_GTT | 717 MI_RESTORE_INHIBIT; 718 } 719 720 *cs++ = MI_NOOP; 721 *cs++ = MI_SET_CONTEXT; 722 *cs++ = i915_ggtt_offset(ce->state) | flags; 723 /* 724 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 725 * WaMiSetContext_Hang:snb,ivb,vlv 726 */ 727 *cs++ = MI_NOOP; 728 729 if (IS_GEN(i915, 7)) { 730 if (num_engines) { 731 struct intel_engine_cs *signaller; 732 i915_reg_t last_reg = {}; /* keep gcc quiet */ 733 734 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 735 for_each_engine(signaller, engine->gt, id) { 736 if (signaller == engine) 737 continue; 738 739 last_reg = RING_PSMI_CTL(signaller->mmio_base); 740 *cs++ = i915_mmio_reg_offset(last_reg); 741 *cs++ = _MASKED_BIT_DISABLE( 742 GEN6_PSMI_SLEEP_MSG_DISABLE); 743 } 744 745 /* Insert a delay before the next switch! */ 746 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 747 *cs++ = i915_mmio_reg_offset(last_reg); 748 *cs++ = intel_gt_scratch_offset(engine->gt, 749 INTEL_GT_SCRATCH_FIELD_DEFAULT); 750 *cs++ = MI_NOOP; 751 } 752 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 753 } else if (IS_GEN(i915, 5)) { 754 *cs++ = MI_SUSPEND_FLUSH; 755 } 756 757 intel_ring_advance(rq, cs); 758 759 return 0; 760 } 761 762 static int remap_l3_slice(struct i915_request *rq, int slice) 763 { 764 u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice]; 765 int i; 766 767 if (!remap_info) 768 return 0; 769 770 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2); 771 if (IS_ERR(cs)) 772 return PTR_ERR(cs); 773 774 /* 775 * Note: We do not worry about the concurrent register cacheline hang 776 * here because no other code should access these registers other than 777 * at initialization time. 778 */ 779 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4); 780 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { 781 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 782 *cs++ = remap_info[i]; 783 } 784 *cs++ = MI_NOOP; 785 intel_ring_advance(rq, cs); 786 787 return 0; 788 } 789 790 static int remap_l3(struct i915_request *rq) 791 { 792 struct i915_gem_context *ctx = i915_request_gem_context(rq); 793 int i, err; 794 795 if (!ctx || !ctx->remap_slice) 796 return 0; 797 798 for (i = 0; i < MAX_L3_SLICES; i++) { 799 if (!(ctx->remap_slice & BIT(i))) 800 continue; 801 802 err = remap_l3_slice(rq, i); 803 if (err) 804 return err; 805 } 806 807 ctx->remap_slice = 0; 808 return 0; 809 } 810 811 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) 812 { 813 int ret; 814 815 if (!vm) 816 return 0; 817 818 ret = rq->engine->emit_flush(rq, EMIT_FLUSH); 819 if (ret) 820 return ret; 821 822 /* 823 * Not only do we need a full barrier (post-sync write) after 824 * invalidating the TLBs, but we need to wait a little bit 825 * longer. Whether this is merely delaying us, or the 826 * subsequent flush is a key part of serialising with the 827 * post-sync op, this extra pass appears vital before a 828 * mm switch! 829 */ 830 ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G); 831 if (ret) 832 return ret; 833 834 return rq->engine->emit_flush(rq, EMIT_INVALIDATE); 835 } 836 837 static int clear_residuals(struct i915_request *rq) 838 { 839 struct intel_engine_cs *engine = rq->engine; 840 int ret; 841 842 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); 843 if (ret) 844 return ret; 845 846 if (engine->kernel_context->state) { 847 ret = mi_set_context(rq, 848 engine->kernel_context, 849 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT); 850 if (ret) 851 return ret; 852 } 853 854 ret = engine->emit_bb_start(rq, 855 engine->wa_ctx.vma->node.start, 0, 856 0); 857 if (ret) 858 return ret; 859 860 ret = engine->emit_flush(rq, EMIT_FLUSH); 861 if (ret) 862 return ret; 863 864 /* Always invalidate before the next switch_mm() */ 865 return engine->emit_flush(rq, EMIT_INVALIDATE); 866 } 867 868 static int switch_context(struct i915_request *rq) 869 { 870 struct intel_engine_cs *engine = rq->engine; 871 struct intel_context *ce = rq->context; 872 void **residuals = NULL; 873 int ret; 874 875 GEM_BUG_ON(HAS_EXECLISTS(engine->i915)); 876 877 if (engine->wa_ctx.vma && ce != engine->kernel_context) { 878 if (engine->wa_ctx.vma->private != ce) { 879 ret = clear_residuals(rq); 880 if (ret) 881 return ret; 882 883 residuals = &engine->wa_ctx.vma->private; 884 } 885 } 886 887 ret = switch_mm(rq, vm_alias(ce->vm)); 888 if (ret) 889 return ret; 890 891 if (ce->state) { 892 u32 flags; 893 894 GEM_BUG_ON(engine->id != RCS0); 895 896 /* For resource streamer on HSW+ and power context elsewhere */ 897 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); 898 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); 899 900 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; 901 if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) 902 flags |= MI_RESTORE_EXT_STATE_EN; 903 else 904 flags |= MI_RESTORE_INHIBIT; 905 906 ret = mi_set_context(rq, ce, flags); 907 if (ret) 908 return ret; 909 } 910 911 ret = remap_l3(rq); 912 if (ret) 913 return ret; 914 915 /* 916 * Now past the point of no return, this request _will_ be emitted. 917 * 918 * Or at least this preamble will be emitted, the request may be 919 * interrupted prior to submitting the user payload. If so, we 920 * still submit the "empty" request in order to preserve global 921 * state tracking such as this, our tracking of the current 922 * dirty context. 923 */ 924 if (residuals) { 925 intel_context_put(*residuals); 926 *residuals = intel_context_get(ce); 927 } 928 929 return 0; 930 } 931 932 static int ring_request_alloc(struct i915_request *request) 933 { 934 int ret; 935 936 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 937 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); 938 939 /* 940 * Flush enough space to reduce the likelihood of waiting after 941 * we start building the request - in which case we will just 942 * have to repeat work. 943 */ 944 request->reserved_space += LEGACY_REQUEST_SIZE; 945 946 /* Unconditionally invalidate GPU caches and TLBs. */ 947 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 948 if (ret) 949 return ret; 950 951 ret = switch_context(request); 952 if (ret) 953 return ret; 954 955 request->reserved_space -= LEGACY_REQUEST_SIZE; 956 return 0; 957 } 958 959 static void gen6_bsd_submit_request(struct i915_request *request) 960 { 961 struct intel_uncore *uncore = request->engine->uncore; 962 963 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 964 965 /* Every tail move must follow the sequence below */ 966 967 /* Disable notification that the ring is IDLE. The GT 968 * will then assume that it is busy and bring it out of rc6. 969 */ 970 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 971 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 972 973 /* Clear the context id. Here be magic! */ 974 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); 975 976 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 977 if (__intel_wait_for_register_fw(uncore, 978 GEN6_BSD_SLEEP_PSMI_CONTROL, 979 GEN6_BSD_SLEEP_INDICATOR, 980 0, 981 1000, 0, NULL)) 982 drm_err(&uncore->i915->drm, 983 "timed out waiting for the BSD ring to wake up\n"); 984 985 /* Now that the ring is fully powered up, update the tail */ 986 i9xx_submit_request(request); 987 988 /* Let the ring send IDLE messages to the GT again, 989 * and so let it sleep to conserve power when idle. 990 */ 991 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 992 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 993 994 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 995 } 996 997 static void i9xx_set_default_submission(struct intel_engine_cs *engine) 998 { 999 engine->submit_request = i9xx_submit_request; 1000 1001 engine->park = NULL; 1002 engine->unpark = NULL; 1003 } 1004 1005 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 1006 { 1007 i9xx_set_default_submission(engine); 1008 engine->submit_request = gen6_bsd_submit_request; 1009 } 1010 1011 static void ring_release(struct intel_engine_cs *engine) 1012 { 1013 struct drm_i915_private *dev_priv = engine->i915; 1014 1015 drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 && 1016 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 1017 1018 intel_engine_cleanup_common(engine); 1019 1020 if (engine->wa_ctx.vma) { 1021 intel_context_put(engine->wa_ctx.vma->private); 1022 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1023 } 1024 1025 intel_ring_unpin(engine->legacy.ring); 1026 intel_ring_put(engine->legacy.ring); 1027 1028 intel_timeline_unpin(engine->legacy.timeline); 1029 intel_timeline_put(engine->legacy.timeline); 1030 } 1031 1032 static void setup_irq(struct intel_engine_cs *engine) 1033 { 1034 struct drm_i915_private *i915 = engine->i915; 1035 1036 if (INTEL_GEN(i915) >= 6) { 1037 engine->irq_enable = gen6_irq_enable; 1038 engine->irq_disable = gen6_irq_disable; 1039 } else if (INTEL_GEN(i915) >= 5) { 1040 engine->irq_enable = gen5_irq_enable; 1041 engine->irq_disable = gen5_irq_disable; 1042 } else if (INTEL_GEN(i915) >= 3) { 1043 engine->irq_enable = gen3_irq_enable; 1044 engine->irq_disable = gen3_irq_disable; 1045 } else { 1046 engine->irq_enable = gen2_irq_enable; 1047 engine->irq_disable = gen2_irq_disable; 1048 } 1049 } 1050 1051 static void setup_common(struct intel_engine_cs *engine) 1052 { 1053 struct drm_i915_private *i915 = engine->i915; 1054 1055 /* gen8+ are only supported with execlists */ 1056 GEM_BUG_ON(INTEL_GEN(i915) >= 8); 1057 1058 setup_irq(engine); 1059 1060 engine->resume = xcs_resume; 1061 engine->reset.prepare = reset_prepare; 1062 engine->reset.rewind = reset_rewind; 1063 engine->reset.cancel = reset_cancel; 1064 engine->reset.finish = reset_finish; 1065 1066 engine->cops = &ring_context_ops; 1067 engine->request_alloc = ring_request_alloc; 1068 1069 /* 1070 * Using a global execution timeline; the previous final breadcrumb is 1071 * equivalent to our next initial bread so we can elide 1072 * engine->emit_init_breadcrumb(). 1073 */ 1074 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; 1075 if (IS_GEN(i915, 5)) 1076 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 1077 1078 engine->set_default_submission = i9xx_set_default_submission; 1079 1080 if (INTEL_GEN(i915) >= 6) 1081 engine->emit_bb_start = gen6_emit_bb_start; 1082 else if (INTEL_GEN(i915) >= 4) 1083 engine->emit_bb_start = gen4_emit_bb_start; 1084 else if (IS_I830(i915) || IS_I845G(i915)) 1085 engine->emit_bb_start = i830_emit_bb_start; 1086 else 1087 engine->emit_bb_start = gen3_emit_bb_start; 1088 } 1089 1090 static void setup_rcs(struct intel_engine_cs *engine) 1091 { 1092 struct drm_i915_private *i915 = engine->i915; 1093 1094 if (HAS_L3_DPF(i915)) 1095 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1096 1097 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1098 1099 if (INTEL_GEN(i915) >= 7) { 1100 engine->emit_flush = gen7_emit_flush_rcs; 1101 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs; 1102 } else if (IS_GEN(i915, 6)) { 1103 engine->emit_flush = gen6_emit_flush_rcs; 1104 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs; 1105 } else if (IS_GEN(i915, 5)) { 1106 engine->emit_flush = gen4_emit_flush_rcs; 1107 } else { 1108 if (INTEL_GEN(i915) < 4) 1109 engine->emit_flush = gen2_emit_flush; 1110 else 1111 engine->emit_flush = gen4_emit_flush_rcs; 1112 engine->irq_enable_mask = I915_USER_INTERRUPT; 1113 } 1114 1115 if (IS_HASWELL(i915)) 1116 engine->emit_bb_start = hsw_emit_bb_start; 1117 } 1118 1119 static void setup_vcs(struct intel_engine_cs *engine) 1120 { 1121 struct drm_i915_private *i915 = engine->i915; 1122 1123 if (INTEL_GEN(i915) >= 6) { 1124 /* gen6 bsd needs a special wa for tail updates */ 1125 if (IS_GEN(i915, 6)) 1126 engine->set_default_submission = gen6_bsd_set_default_submission; 1127 engine->emit_flush = gen6_emit_flush_vcs; 1128 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1129 1130 if (IS_GEN(i915, 6)) 1131 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1132 else 1133 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1134 } else { 1135 engine->emit_flush = gen4_emit_flush_vcs; 1136 if (IS_GEN(i915, 5)) 1137 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 1138 else 1139 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1140 } 1141 } 1142 1143 static void setup_bcs(struct intel_engine_cs *engine) 1144 { 1145 struct drm_i915_private *i915 = engine->i915; 1146 1147 engine->emit_flush = gen6_emit_flush_xcs; 1148 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 1149 1150 if (IS_GEN(i915, 6)) 1151 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1152 else 1153 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1154 } 1155 1156 static void setup_vecs(struct intel_engine_cs *engine) 1157 { 1158 struct drm_i915_private *i915 = engine->i915; 1159 1160 GEM_BUG_ON(INTEL_GEN(i915) < 7); 1161 1162 engine->emit_flush = gen6_emit_flush_xcs; 1163 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 1164 engine->irq_enable = hsw_irq_enable_vecs; 1165 engine->irq_disable = hsw_irq_disable_vecs; 1166 1167 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1168 } 1169 1170 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, 1171 struct i915_vma * const vma) 1172 { 1173 return gen7_setup_clear_gpr_bb(engine, vma); 1174 } 1175 1176 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine) 1177 { 1178 struct drm_i915_gem_object *obj; 1179 struct i915_vma *vma; 1180 int size; 1181 int err; 1182 1183 size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); 1184 if (size <= 0) 1185 return size; 1186 1187 size = ALIGN(size, PAGE_SIZE); 1188 obj = i915_gem_object_create_internal(engine->i915, size); 1189 if (IS_ERR(obj)) 1190 return PTR_ERR(obj); 1191 1192 vma = i915_vma_instance(obj, engine->gt->vm, NULL); 1193 if (IS_ERR(vma)) { 1194 err = PTR_ERR(vma); 1195 goto err_obj; 1196 } 1197 1198 vma->private = intel_context_create(engine); /* dummy residuals */ 1199 if (IS_ERR(vma->private)) { 1200 err = PTR_ERR(vma->private); 1201 goto err_obj; 1202 } 1203 1204 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH); 1205 if (err) 1206 goto err_private; 1207 1208 err = i915_vma_sync(vma); 1209 if (err) 1210 goto err_unpin; 1211 1212 err = gen7_ctx_switch_bb_setup(engine, vma); 1213 if (err) 1214 goto err_unpin; 1215 1216 engine->wa_ctx.vma = vma; 1217 return 0; 1218 1219 err_unpin: 1220 i915_vma_unpin(vma); 1221 err_private: 1222 intel_context_put(vma->private); 1223 err_obj: 1224 i915_gem_object_put(obj); 1225 return err; 1226 } 1227 1228 int intel_ring_submission_setup(struct intel_engine_cs *engine) 1229 { 1230 struct intel_timeline *timeline; 1231 struct intel_ring *ring; 1232 int err; 1233 1234 setup_common(engine); 1235 1236 switch (engine->class) { 1237 case RENDER_CLASS: 1238 setup_rcs(engine); 1239 break; 1240 case VIDEO_DECODE_CLASS: 1241 setup_vcs(engine); 1242 break; 1243 case COPY_ENGINE_CLASS: 1244 setup_bcs(engine); 1245 break; 1246 case VIDEO_ENHANCEMENT_CLASS: 1247 setup_vecs(engine); 1248 break; 1249 default: 1250 MISSING_CASE(engine->class); 1251 return -ENODEV; 1252 } 1253 1254 timeline = intel_timeline_create_from_engine(engine, 1255 I915_GEM_HWS_SEQNO_ADDR); 1256 if (IS_ERR(timeline)) { 1257 err = PTR_ERR(timeline); 1258 goto err; 1259 } 1260 GEM_BUG_ON(timeline->has_initial_breadcrumb); 1261 1262 err = intel_timeline_pin(timeline); 1263 if (err) 1264 goto err_timeline; 1265 1266 ring = intel_engine_create_ring(engine, SZ_16K); 1267 if (IS_ERR(ring)) { 1268 err = PTR_ERR(ring); 1269 goto err_timeline_unpin; 1270 } 1271 1272 err = intel_ring_pin(ring); 1273 if (err) 1274 goto err_ring; 1275 1276 GEM_BUG_ON(engine->legacy.ring); 1277 engine->legacy.ring = ring; 1278 engine->legacy.timeline = timeline; 1279 1280 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 1281 1282 if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) { 1283 err = gen7_ctx_switch_bb_init(engine); 1284 if (err) 1285 goto err_ring_unpin; 1286 } 1287 1288 /* Finally, take ownership and responsibility for cleanup! */ 1289 engine->release = ring_release; 1290 1291 return 0; 1292 1293 err_ring_unpin: 1294 intel_ring_unpin(ring); 1295 err_ring: 1296 intel_ring_put(ring); 1297 err_timeline_unpin: 1298 intel_timeline_unpin(timeline); 1299 err_timeline: 1300 intel_timeline_put(timeline); 1301 err: 1302 intel_engine_cleanup_common(engine); 1303 return err; 1304 } 1305 1306 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1307 #include "selftest_ring_submission.c" 1308 #endif 1309