1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014 Intel Corporation 4 */ 5 6 #include "gen8_engine_cs.h" 7 #include "i915_drv.h" 8 #include "intel_engine_regs.h" 9 #include "intel_gpu_commands.h" 10 #include "intel_lrc.h" 11 #include "intel_ring.h" 12 13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) 14 { 15 bool vf_flush_wa = false, dc_flush_wa = false; 16 u32 *cs, flags = 0; 17 int len; 18 19 flags |= PIPE_CONTROL_CS_STALL; 20 21 if (mode & EMIT_FLUSH) { 22 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 23 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 24 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 25 flags |= PIPE_CONTROL_FLUSH_ENABLE; 26 } 27 28 if (mode & EMIT_INVALIDATE) { 29 flags |= PIPE_CONTROL_TLB_INVALIDATE; 30 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 31 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 32 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 33 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 34 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 35 flags |= PIPE_CONTROL_QW_WRITE; 36 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 37 38 /* 39 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 40 * pipe control. 41 */ 42 if (GRAPHICS_VER(rq->i915) == 9) 43 vf_flush_wa = true; 44 45 /* WaForGAMHang:kbl */ 46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0)) 47 dc_flush_wa = true; 48 } 49 50 len = 6; 51 52 if (vf_flush_wa) 53 len += 6; 54 55 if (dc_flush_wa) 56 len += 12; 57 58 cs = intel_ring_begin(rq, len); 59 if (IS_ERR(cs)) 60 return PTR_ERR(cs); 61 62 if (vf_flush_wa) 63 cs = gen8_emit_pipe_control(cs, 0, 0); 64 65 if (dc_flush_wa) 66 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, 67 0); 68 69 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 70 71 if (dc_flush_wa) 72 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); 73 74 intel_ring_advance(rq, cs); 75 76 return 0; 77 } 78 79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) 80 { 81 u32 cmd, *cs; 82 83 cs = intel_ring_begin(rq, 4); 84 if (IS_ERR(cs)) 85 return PTR_ERR(cs); 86 87 cmd = MI_FLUSH_DW + 1; 88 89 /* 90 * We always require a command barrier so that subsequent 91 * commands, such as breadcrumb interrupts, are strictly ordered 92 * wrt the contents of the write cache being flushed to memory 93 * (and thus being coherent from the CPU). 94 */ 95 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 96 97 if (mode & EMIT_INVALIDATE) { 98 cmd |= MI_INVALIDATE_TLB; 99 if (rq->engine->class == VIDEO_DECODE_CLASS) 100 cmd |= MI_INVALIDATE_BSD; 101 } 102 103 *cs++ = cmd; 104 *cs++ = LRC_PPHWSP_SCRATCH_ADDR; 105 *cs++ = 0; /* upper addr */ 106 *cs++ = 0; /* value */ 107 intel_ring_advance(rq, cs); 108 109 return 0; 110 } 111 112 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode) 113 { 114 if (mode & EMIT_FLUSH) { 115 u32 *cs; 116 u32 flags = 0; 117 118 flags |= PIPE_CONTROL_CS_STALL; 119 120 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; 121 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 122 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 123 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 124 flags |= PIPE_CONTROL_FLUSH_ENABLE; 125 flags |= PIPE_CONTROL_QW_WRITE; 126 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 127 128 cs = intel_ring_begin(rq, 6); 129 if (IS_ERR(cs)) 130 return PTR_ERR(cs); 131 132 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 133 intel_ring_advance(rq, cs); 134 } 135 136 if (mode & EMIT_INVALIDATE) { 137 u32 *cs; 138 u32 flags = 0; 139 140 flags |= PIPE_CONTROL_CS_STALL; 141 142 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; 143 flags |= PIPE_CONTROL_TLB_INVALIDATE; 144 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 145 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 146 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 147 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 148 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 149 flags |= PIPE_CONTROL_QW_WRITE; 150 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 151 152 cs = intel_ring_begin(rq, 6); 153 if (IS_ERR(cs)) 154 return PTR_ERR(cs); 155 156 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 157 intel_ring_advance(rq, cs); 158 } 159 160 return 0; 161 } 162 163 static u32 preparser_disable(bool state) 164 { 165 return MI_ARB_CHECK | 1 << 8 | state; 166 } 167 168 static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine) 169 { 170 switch (engine->id) { 171 case RCS0: 172 return GEN12_CCS_AUX_INV; 173 case BCS0: 174 return GEN12_BCS0_AUX_INV; 175 case VCS0: 176 return GEN12_VD0_AUX_INV; 177 case VCS2: 178 return GEN12_VD2_AUX_INV; 179 case VECS0: 180 return GEN12_VE0_AUX_INV; 181 case CCS0: 182 return GEN12_CCS0_AUX_INV; 183 default: 184 return INVALID_MMIO_REG; 185 } 186 } 187 188 static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine) 189 { 190 i915_reg_t reg = gen12_get_aux_inv_reg(engine); 191 192 if (IS_PONTEVECCHIO(engine->i915)) 193 return false; 194 195 /* 196 * So far platforms supported by i915 having flat ccs do not require 197 * AUX invalidation. Check also whether the engine requires it. 198 */ 199 return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915); 200 } 201 202 u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs) 203 { 204 i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine); 205 u32 gsi_offset = engine->gt->uncore->gsi_offset; 206 207 if (!gen12_needs_ccs_aux_inv(engine)) 208 return cs; 209 210 *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; 211 *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; 212 *cs++ = AUX_INV; 213 214 *cs++ = MI_SEMAPHORE_WAIT_TOKEN | 215 MI_SEMAPHORE_REGISTER_POLL | 216 MI_SEMAPHORE_POLL | 217 MI_SEMAPHORE_SAD_EQ_SDD; 218 *cs++ = 0; 219 *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; 220 *cs++ = 0; 221 *cs++ = 0; 222 223 return cs; 224 } 225 226 static int mtl_dummy_pipe_control(struct i915_request *rq) 227 { 228 /* Wa_14016712196 */ 229 if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) || 230 IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) { 231 u32 *cs; 232 233 /* dummy PIPE_CONTROL + depth flush */ 234 cs = intel_ring_begin(rq, 6); 235 if (IS_ERR(cs)) 236 return PTR_ERR(cs); 237 cs = gen12_emit_pipe_control(cs, 238 0, 239 PIPE_CONTROL_DEPTH_CACHE_FLUSH, 240 LRC_PPHWSP_SCRATCH_ADDR); 241 intel_ring_advance(rq, cs); 242 } 243 244 return 0; 245 } 246 247 int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) 248 { 249 struct intel_engine_cs *engine = rq->engine; 250 251 /* 252 * On Aux CCS platforms the invalidation of the Aux 253 * table requires quiescing memory traffic beforehand 254 */ 255 if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) { 256 u32 bit_group_0 = 0; 257 u32 bit_group_1 = 0; 258 int err; 259 u32 *cs; 260 261 err = mtl_dummy_pipe_control(rq); 262 if (err) 263 return err; 264 265 bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH; 266 267 /* 268 * When required, in MTL and beyond platforms we 269 * need to set the CCS_FLUSH bit in the pipe control 270 */ 271 if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70)) 272 bit_group_0 |= PIPE_CONTROL_CCS_FLUSH; 273 274 /* 275 * L3 fabric flush is needed for AUX CCS invalidation 276 * which happens as part of pipe-control so we can 277 * ignore PIPE_CONTROL_FLUSH_L3. Also PIPE_CONTROL_FLUSH_L3 278 * deals with Protected Memory which is not needed for 279 * AUX CCS invalidation and lead to unwanted side effects. 280 */ 281 if (mode & EMIT_FLUSH) 282 bit_group_1 |= PIPE_CONTROL_FLUSH_L3; 283 284 bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH; 285 bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 286 bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 287 /* Wa_1409600907:tgl,adl-p */ 288 bit_group_1 |= PIPE_CONTROL_DEPTH_STALL; 289 bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE; 290 bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE; 291 292 bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX; 293 bit_group_1 |= PIPE_CONTROL_QW_WRITE; 294 295 bit_group_1 |= PIPE_CONTROL_CS_STALL; 296 297 if (!HAS_3D_PIPELINE(engine->i915)) 298 bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS; 299 else if (engine->class == COMPUTE_CLASS) 300 bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; 301 302 cs = intel_ring_begin(rq, 6); 303 if (IS_ERR(cs)) 304 return PTR_ERR(cs); 305 306 cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1, 307 LRC_PPHWSP_SCRATCH_ADDR); 308 intel_ring_advance(rq, cs); 309 } 310 311 if (mode & EMIT_INVALIDATE) { 312 u32 flags = 0; 313 u32 *cs, count; 314 int err; 315 316 err = mtl_dummy_pipe_control(rq); 317 if (err) 318 return err; 319 320 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; 321 flags |= PIPE_CONTROL_TLB_INVALIDATE; 322 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 323 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 324 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 325 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 326 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 327 328 flags |= PIPE_CONTROL_STORE_DATA_INDEX; 329 flags |= PIPE_CONTROL_QW_WRITE; 330 331 flags |= PIPE_CONTROL_CS_STALL; 332 333 if (!HAS_3D_PIPELINE(engine->i915)) 334 flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS; 335 else if (engine->class == COMPUTE_CLASS) 336 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; 337 338 count = 8; 339 if (gen12_needs_ccs_aux_inv(rq->engine)) 340 count += 8; 341 342 cs = intel_ring_begin(rq, count); 343 if (IS_ERR(cs)) 344 return PTR_ERR(cs); 345 346 /* 347 * Prevent the pre-parser from skipping past the TLB 348 * invalidate and loading a stale page for the batch 349 * buffer / request payload. 350 */ 351 *cs++ = preparser_disable(true); 352 353 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); 354 355 cs = gen12_emit_aux_table_inv(engine, cs); 356 357 *cs++ = preparser_disable(false); 358 intel_ring_advance(rq, cs); 359 } 360 361 return 0; 362 } 363 364 int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) 365 { 366 u32 cmd = 4; 367 u32 *cs; 368 369 if (mode & EMIT_INVALIDATE) { 370 cmd += 2; 371 372 if (gen12_needs_ccs_aux_inv(rq->engine)) 373 cmd += 8; 374 } 375 376 cs = intel_ring_begin(rq, cmd); 377 if (IS_ERR(cs)) 378 return PTR_ERR(cs); 379 380 if (mode & EMIT_INVALIDATE) 381 *cs++ = preparser_disable(true); 382 383 cmd = MI_FLUSH_DW + 1; 384 385 /* 386 * We always require a command barrier so that subsequent 387 * commands, such as breadcrumb interrupts, are strictly ordered 388 * wrt the contents of the write cache being flushed to memory 389 * (and thus being coherent from the CPU). 390 */ 391 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 392 393 if (mode & EMIT_INVALIDATE) { 394 cmd |= MI_INVALIDATE_TLB; 395 if (rq->engine->class == VIDEO_DECODE_CLASS) 396 cmd |= MI_INVALIDATE_BSD; 397 398 if (gen12_needs_ccs_aux_inv(rq->engine) && 399 rq->engine->class == COPY_ENGINE_CLASS) 400 cmd |= MI_FLUSH_DW_CCS; 401 } 402 403 *cs++ = cmd; 404 *cs++ = LRC_PPHWSP_SCRATCH_ADDR; 405 *cs++ = 0; /* upper addr */ 406 *cs++ = 0; /* value */ 407 408 cs = gen12_emit_aux_table_inv(rq->engine, cs); 409 410 if (mode & EMIT_INVALIDATE) 411 *cs++ = preparser_disable(false); 412 413 intel_ring_advance(rq, cs); 414 415 return 0; 416 } 417 418 static u32 preempt_address(struct intel_engine_cs *engine) 419 { 420 return (i915_ggtt_offset(engine->status_page.vma) + 421 I915_GEM_HWS_PREEMPT_ADDR); 422 } 423 424 static u32 hwsp_offset(const struct i915_request *rq) 425 { 426 const struct intel_timeline *tl; 427 428 /* Before the request is executed, the timeline is fixed */ 429 tl = rcu_dereference_protected(rq->timeline, 430 !i915_request_signaled(rq)); 431 432 /* See the comment in i915_request_active_seqno(). */ 433 return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno); 434 } 435 436 int gen8_emit_init_breadcrumb(struct i915_request *rq) 437 { 438 u32 *cs; 439 440 GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq)); 441 if (!i915_request_timeline(rq)->has_initial_breadcrumb) 442 return 0; 443 444 cs = intel_ring_begin(rq, 6); 445 if (IS_ERR(cs)) 446 return PTR_ERR(cs); 447 448 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 449 *cs++ = hwsp_offset(rq); 450 *cs++ = 0; 451 *cs++ = rq->fence.seqno - 1; 452 453 /* 454 * Check if we have been preempted before we even get started. 455 * 456 * After this point i915_request_started() reports true, even if 457 * we get preempted and so are no longer running. 458 * 459 * i915_request_started() is used during preemption processing 460 * to decide if the request is currently inside the user payload 461 * or spinning on a kernel semaphore (or earlier). For no-preemption 462 * requests, we do allow preemption on the semaphore before the user 463 * payload, but do not allow preemption once the request is started. 464 * 465 * i915_request_started() is similarly used during GPU hangs to 466 * determine if the user's payload was guilty, and if so, the 467 * request is banned. Before the request is started, it is assumed 468 * to be unharmed and an innocent victim of another's hang. 469 */ 470 *cs++ = MI_NOOP; 471 *cs++ = MI_ARB_CHECK; 472 473 intel_ring_advance(rq, cs); 474 475 /* Record the updated position of the request's payload */ 476 rq->infix = intel_ring_offset(rq, cs); 477 478 __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags); 479 480 return 0; 481 } 482 483 static int __xehp_emit_bb_start(struct i915_request *rq, 484 u64 offset, u32 len, 485 const unsigned int flags, 486 u32 arb) 487 { 488 struct intel_context *ce = rq->context; 489 u32 wa_offset = lrc_indirect_bb(ce); 490 u32 *cs; 491 492 GEM_BUG_ON(!ce->wa_bb_page); 493 494 cs = intel_ring_begin(rq, 12); 495 if (IS_ERR(cs)) 496 return PTR_ERR(cs); 497 498 *cs++ = MI_ARB_ON_OFF | arb; 499 500 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | 501 MI_SRM_LRM_GLOBAL_GTT | 502 MI_LRI_LRM_CS_MMIO; 503 *cs++ = i915_mmio_reg_offset(RING_PREDICATE_RESULT(0)); 504 *cs++ = wa_offset + DG2_PREDICATE_RESULT_WA; 505 *cs++ = 0; 506 507 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 508 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 509 *cs++ = lower_32_bits(offset); 510 *cs++ = upper_32_bits(offset); 511 512 /* Fixup stray MI_SET_PREDICATE as it prevents us executing the ring */ 513 *cs++ = MI_BATCH_BUFFER_START_GEN8; 514 *cs++ = wa_offset + DG2_PREDICATE_RESULT_BB; 515 *cs++ = 0; 516 517 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 518 519 intel_ring_advance(rq, cs); 520 521 return 0; 522 } 523 524 int xehp_emit_bb_start_noarb(struct i915_request *rq, 525 u64 offset, u32 len, 526 const unsigned int flags) 527 { 528 return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_DISABLE); 529 } 530 531 int xehp_emit_bb_start(struct i915_request *rq, 532 u64 offset, u32 len, 533 const unsigned int flags) 534 { 535 return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_ENABLE); 536 } 537 538 int gen8_emit_bb_start_noarb(struct i915_request *rq, 539 u64 offset, u32 len, 540 const unsigned int flags) 541 { 542 u32 *cs; 543 544 cs = intel_ring_begin(rq, 4); 545 if (IS_ERR(cs)) 546 return PTR_ERR(cs); 547 548 /* 549 * WaDisableCtxRestoreArbitration:bdw,chv 550 * 551 * We don't need to perform MI_ARB_ENABLE as often as we do (in 552 * particular all the gen that do not need the w/a at all!), if we 553 * took care to make sure that on every switch into this context 554 * (both ordinary and for preemption) that arbitrartion was enabled 555 * we would be fine. However, for gen8 there is another w/a that 556 * requires us to not preempt inside GPGPU execution, so we keep 557 * arbitration disabled for gen8 batches. Arbitration will be 558 * re-enabled before we close the request 559 * (engine->emit_fini_breadcrumb). 560 */ 561 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 562 563 /* FIXME(BDW+): Address space and security selectors. */ 564 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 565 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 566 *cs++ = lower_32_bits(offset); 567 *cs++ = upper_32_bits(offset); 568 569 intel_ring_advance(rq, cs); 570 571 return 0; 572 } 573 574 int gen8_emit_bb_start(struct i915_request *rq, 575 u64 offset, u32 len, 576 const unsigned int flags) 577 { 578 u32 *cs; 579 580 if (unlikely(i915_request_has_nopreempt(rq))) 581 return gen8_emit_bb_start_noarb(rq, offset, len, flags); 582 583 cs = intel_ring_begin(rq, 6); 584 if (IS_ERR(cs)) 585 return PTR_ERR(cs); 586 587 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 588 589 *cs++ = MI_BATCH_BUFFER_START_GEN8 | 590 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); 591 *cs++ = lower_32_bits(offset); 592 *cs++ = upper_32_bits(offset); 593 594 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 595 *cs++ = MI_NOOP; 596 597 intel_ring_advance(rq, cs); 598 599 return 0; 600 } 601 602 static void assert_request_valid(struct i915_request *rq) 603 { 604 struct intel_ring *ring __maybe_unused = rq->ring; 605 606 /* Can we unwind this request without appearing to go forwards? */ 607 GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0); 608 } 609 610 /* 611 * Reserve space for 2 NOOPs at the end of each request to be 612 * used as a workaround for not being allowed to do lite 613 * restore with HEAD==TAIL (WaIdleLiteRestore). 614 */ 615 static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs) 616 { 617 /* Ensure there's always at least one preemption point per-request. */ 618 *cs++ = MI_ARB_CHECK; 619 *cs++ = MI_NOOP; 620 rq->wa_tail = intel_ring_offset(rq, cs); 621 622 /* Check that entire request is less than half the ring */ 623 assert_request_valid(rq); 624 625 return cs; 626 } 627 628 static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs) 629 { 630 *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */ 631 *cs++ = MI_SEMAPHORE_WAIT | 632 MI_SEMAPHORE_GLOBAL_GTT | 633 MI_SEMAPHORE_POLL | 634 MI_SEMAPHORE_SAD_EQ_SDD; 635 *cs++ = 0; 636 *cs++ = preempt_address(rq->engine); 637 *cs++ = 0; 638 *cs++ = MI_NOOP; 639 640 return cs; 641 } 642 643 static __always_inline u32* 644 gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) 645 { 646 *cs++ = MI_USER_INTERRUPT; 647 648 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 649 if (intel_engine_has_semaphores(rq->engine) && 650 !intel_uc_uses_guc_submission(&rq->engine->gt->uc)) 651 cs = emit_preempt_busywait(rq, cs); 652 653 rq->tail = intel_ring_offset(rq, cs); 654 assert_ring_tail_valid(rq->ring, rq->tail); 655 656 return gen8_emit_wa_tail(rq, cs); 657 } 658 659 static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs) 660 { 661 return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0); 662 } 663 664 u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs) 665 { 666 return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); 667 } 668 669 u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) 670 { 671 cs = gen8_emit_pipe_control(cs, 672 PIPE_CONTROL_CS_STALL | 673 PIPE_CONTROL_TLB_INVALIDATE | 674 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 675 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 676 PIPE_CONTROL_DC_FLUSH_ENABLE, 677 0); 678 679 /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ 680 cs = gen8_emit_ggtt_write_rcs(cs, 681 rq->fence.seqno, 682 hwsp_offset(rq), 683 PIPE_CONTROL_FLUSH_ENABLE | 684 PIPE_CONTROL_CS_STALL); 685 686 return gen8_emit_fini_breadcrumb_tail(rq, cs); 687 } 688 689 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) 690 { 691 cs = gen8_emit_pipe_control(cs, 692 PIPE_CONTROL_CS_STALL | 693 PIPE_CONTROL_TLB_INVALIDATE | 694 PIPE_CONTROL_TILE_CACHE_FLUSH | 695 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 696 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 697 PIPE_CONTROL_DC_FLUSH_ENABLE, 698 0); 699 700 /*XXX: Look at gen8_emit_fini_breadcrumb_rcs */ 701 cs = gen8_emit_ggtt_write_rcs(cs, 702 rq->fence.seqno, 703 hwsp_offset(rq), 704 PIPE_CONTROL_FLUSH_ENABLE | 705 PIPE_CONTROL_CS_STALL); 706 707 return gen8_emit_fini_breadcrumb_tail(rq, cs); 708 } 709 710 /* 711 * Note that the CS instruction pre-parser will not stall on the breadcrumb 712 * flush and will continue pre-fetching the instructions after it before the 713 * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at 714 * BB_START/END instructions, so, even though we might pre-fetch the pre-amble 715 * of the next request before the memory has been flushed, we're guaranteed that 716 * we won't access the batch itself too early. 717 * However, on gen12+ the parser can pre-fetch across the BB_START/END commands, 718 * so, if the current request is modifying an instruction in the next request on 719 * the same intel_context, we might pre-fetch and then execute the pre-update 720 * instruction. To avoid this, the users of self-modifying code should either 721 * disable the parser around the code emitting the memory writes, via a new flag 722 * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For 723 * the in-kernel use-cases we've opted to use a separate context, see 724 * reloc_gpu() as an example. 725 * All the above applies only to the instructions themselves. Non-inline data 726 * used by the instructions is not pre-fetched. 727 */ 728 729 static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs) 730 { 731 *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */ 732 *cs++ = MI_SEMAPHORE_WAIT_TOKEN | 733 MI_SEMAPHORE_GLOBAL_GTT | 734 MI_SEMAPHORE_POLL | 735 MI_SEMAPHORE_SAD_EQ_SDD; 736 *cs++ = 0; 737 *cs++ = preempt_address(rq->engine); 738 *cs++ = 0; 739 *cs++ = 0; 740 741 return cs; 742 } 743 744 /* Wa_14014475959:dg2 */ 745 #define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540 746 static u32 ccs_semaphore_offset(struct i915_request *rq) 747 { 748 return i915_ggtt_offset(rq->context->state) + 749 (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET; 750 } 751 752 /* Wa_14014475959:dg2 */ 753 static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs) 754 { 755 int i; 756 757 *cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL | 758 MI_ATOMIC_MOVE; 759 *cs++ = ccs_semaphore_offset(rq); 760 *cs++ = 0; 761 *cs++ = 1; 762 763 /* 764 * When MI_ATOMIC_INLINE_DATA set this command must be 11 DW + (1 NOP) 765 * to align. 4 DWs above + 8 filler DWs here. 766 */ 767 for (i = 0; i < 8; ++i) 768 *cs++ = 0; 769 770 *cs++ = MI_SEMAPHORE_WAIT | 771 MI_SEMAPHORE_GLOBAL_GTT | 772 MI_SEMAPHORE_POLL | 773 MI_SEMAPHORE_SAD_EQ_SDD; 774 *cs++ = 0; 775 *cs++ = ccs_semaphore_offset(rq); 776 *cs++ = 0; 777 778 return cs; 779 } 780 781 static __always_inline u32* 782 gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) 783 { 784 *cs++ = MI_USER_INTERRUPT; 785 786 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 787 if (intel_engine_has_semaphores(rq->engine) && 788 !intel_uc_uses_guc_submission(&rq->engine->gt->uc)) 789 cs = gen12_emit_preempt_busywait(rq, cs); 790 791 /* Wa_14014475959:dg2 */ 792 if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine)) 793 cs = ccs_emit_wa_busywait(rq, cs); 794 795 rq->tail = intel_ring_offset(rq, cs); 796 assert_ring_tail_valid(rq->ring, rq->tail); 797 798 return gen8_emit_wa_tail(rq, cs); 799 } 800 801 u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs) 802 { 803 /* XXX Stalling flush before seqno write; post-sync not */ 804 cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0)); 805 return gen12_emit_fini_breadcrumb_tail(rq, cs); 806 } 807 808 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) 809 { 810 struct drm_i915_private *i915 = rq->i915; 811 u32 flags = (PIPE_CONTROL_CS_STALL | 812 PIPE_CONTROL_TLB_INVALIDATE | 813 PIPE_CONTROL_TILE_CACHE_FLUSH | 814 PIPE_CONTROL_FLUSH_L3 | 815 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 816 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 817 PIPE_CONTROL_DC_FLUSH_ENABLE | 818 PIPE_CONTROL_FLUSH_ENABLE); 819 820 /* Wa_14016712196 */ 821 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || 822 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) 823 /* dummy PIPE_CONTROL + depth flush */ 824 cs = gen12_emit_pipe_control(cs, 0, 825 PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0); 826 827 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) 828 /* Wa_1409600907 */ 829 flags |= PIPE_CONTROL_DEPTH_STALL; 830 831 if (!HAS_3D_PIPELINE(rq->i915)) 832 flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS; 833 else if (rq->engine->class == COMPUTE_CLASS) 834 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; 835 836 cs = gen12_emit_pipe_control(cs, PIPE_CONTROL0_HDC_PIPELINE_FLUSH, flags, 0); 837 838 /*XXX: Look at gen8_emit_fini_breadcrumb_rcs */ 839 cs = gen12_emit_ggtt_write_rcs(cs, 840 rq->fence.seqno, 841 hwsp_offset(rq), 842 0, 843 PIPE_CONTROL_FLUSH_ENABLE | 844 PIPE_CONTROL_CS_STALL); 845 846 return gen12_emit_fini_breadcrumb_tail(rq, cs); 847 } 848