1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <linux/log2.h> 31 32 #include <drm/i915_drm.h> 33 34 #include "gem/i915_gem_context.h" 35 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_context.h" 39 #include "intel_gt.h" 40 #include "intel_gt_irq.h" 41 #include "intel_gt_pm_irq.h" 42 #include "intel_reset.h" 43 #include "intel_ring.h" 44 #include "intel_workarounds.h" 45 46 /* Rough estimate of the typical request size, performing a flush, 47 * set-context and then emitting the batch. 48 */ 49 #define LEGACY_REQUEST_SIZE 200 50 51 static int 52 gen2_render_ring_flush(struct i915_request *rq, u32 mode) 53 { 54 unsigned int num_store_dw; 55 u32 cmd, *cs; 56 57 cmd = MI_FLUSH; 58 num_store_dw = 0; 59 if (mode & EMIT_INVALIDATE) 60 cmd |= MI_READ_FLUSH; 61 if (mode & EMIT_FLUSH) 62 num_store_dw = 4; 63 64 cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); 65 if (IS_ERR(cs)) 66 return PTR_ERR(cs); 67 68 *cs++ = cmd; 69 while (num_store_dw--) { 70 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 71 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 72 INTEL_GT_SCRATCH_FIELD_DEFAULT); 73 *cs++ = 0; 74 } 75 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; 76 77 intel_ring_advance(rq, cs); 78 79 return 0; 80 } 81 82 static int 83 gen4_render_ring_flush(struct i915_request *rq, u32 mode) 84 { 85 u32 cmd, *cs; 86 int i; 87 88 /* 89 * read/write caches: 90 * 91 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 92 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 93 * also flushed at 2d versus 3d pipeline switches. 94 * 95 * read-only caches: 96 * 97 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 98 * MI_READ_FLUSH is set, and is always flushed on 965. 99 * 100 * I915_GEM_DOMAIN_COMMAND may not exist? 101 * 102 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 103 * invalidated when MI_EXE_FLUSH is set. 104 * 105 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 106 * invalidated with every MI_FLUSH. 107 * 108 * TLBs: 109 * 110 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 111 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 112 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 113 * are flushed at any MI_FLUSH. 114 */ 115 116 cmd = MI_FLUSH; 117 if (mode & EMIT_INVALIDATE) { 118 cmd |= MI_EXE_FLUSH; 119 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5)) 120 cmd |= MI_INVALIDATE_ISP; 121 } 122 123 i = 2; 124 if (mode & EMIT_INVALIDATE) 125 i += 20; 126 127 cs = intel_ring_begin(rq, i); 128 if (IS_ERR(cs)) 129 return PTR_ERR(cs); 130 131 *cs++ = cmd; 132 133 /* 134 * A random delay to let the CS invalidate take effect? Without this 135 * delay, the GPU relocation path fails as the CS does not see 136 * the updated contents. Just as important, if we apply the flushes 137 * to the EMIT_FLUSH branch (i.e. immediately after the relocation 138 * write and before the invalidate on the next batch), the relocations 139 * still fail. This implies that is a delay following invalidation 140 * that is required to reset the caches as opposed to a delay to 141 * ensure the memory is written. 142 */ 143 if (mode & EMIT_INVALIDATE) { 144 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; 145 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 146 INTEL_GT_SCRATCH_FIELD_DEFAULT) | 147 PIPE_CONTROL_GLOBAL_GTT; 148 *cs++ = 0; 149 *cs++ = 0; 150 151 for (i = 0; i < 12; i++) 152 *cs++ = MI_FLUSH; 153 154 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; 155 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 156 INTEL_GT_SCRATCH_FIELD_DEFAULT) | 157 PIPE_CONTROL_GLOBAL_GTT; 158 *cs++ = 0; 159 *cs++ = 0; 160 } 161 162 *cs++ = cmd; 163 164 intel_ring_advance(rq, cs); 165 166 return 0; 167 } 168 169 /* 170 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 171 * implementing two workarounds on gen6. From section 1.4.7.1 172 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 173 * 174 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 175 * produced by non-pipelined state commands), software needs to first 176 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 177 * 0. 178 * 179 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 180 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 181 * 182 * And the workaround for these two requires this workaround first: 183 * 184 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 185 * BEFORE the pipe-control with a post-sync op and no write-cache 186 * flushes. 187 * 188 * And this last workaround is tricky because of the requirements on 189 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 190 * volume 2 part 1: 191 * 192 * "1 of the following must also be set: 193 * - Render Target Cache Flush Enable ([12] of DW1) 194 * - Depth Cache Flush Enable ([0] of DW1) 195 * - Stall at Pixel Scoreboard ([1] of DW1) 196 * - Depth Stall ([13] of DW1) 197 * - Post-Sync Operation ([13] of DW1) 198 * - Notify Enable ([8] of DW1)" 199 * 200 * The cache flushes require the workaround flush that triggered this 201 * one, so we can't use it. Depth stall would trigger the same. 202 * Post-sync nonzero is what triggered this second workaround, so we 203 * can't use that one either. Notify enable is IRQs, which aren't 204 * really our business. That leaves only stall at scoreboard. 205 */ 206 static int 207 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) 208 { 209 u32 scratch_addr = 210 intel_gt_scratch_offset(rq->engine->gt, 211 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); 212 u32 *cs; 213 214 cs = intel_ring_begin(rq, 6); 215 if (IS_ERR(cs)) 216 return PTR_ERR(cs); 217 218 *cs++ = GFX_OP_PIPE_CONTROL(5); 219 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 220 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 221 *cs++ = 0; /* low dword */ 222 *cs++ = 0; /* high dword */ 223 *cs++ = MI_NOOP; 224 intel_ring_advance(rq, cs); 225 226 cs = intel_ring_begin(rq, 6); 227 if (IS_ERR(cs)) 228 return PTR_ERR(cs); 229 230 *cs++ = GFX_OP_PIPE_CONTROL(5); 231 *cs++ = PIPE_CONTROL_QW_WRITE; 232 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 233 *cs++ = 0; 234 *cs++ = 0; 235 *cs++ = MI_NOOP; 236 intel_ring_advance(rq, cs); 237 238 return 0; 239 } 240 241 static int 242 gen6_render_ring_flush(struct i915_request *rq, u32 mode) 243 { 244 u32 scratch_addr = 245 intel_gt_scratch_offset(rq->engine->gt, 246 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); 247 u32 *cs, flags = 0; 248 int ret; 249 250 /* Force SNB workarounds for PIPE_CONTROL flushes */ 251 ret = gen6_emit_post_sync_nonzero_flush(rq); 252 if (ret) 253 return ret; 254 255 /* Just flush everything. Experiments have shown that reducing the 256 * number of bits based on the write domains has little performance 257 * impact. 258 */ 259 if (mode & EMIT_FLUSH) { 260 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 261 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 262 /* 263 * Ensure that any following seqno writes only happen 264 * when the render cache is indeed flushed. 265 */ 266 flags |= PIPE_CONTROL_CS_STALL; 267 } 268 if (mode & EMIT_INVALIDATE) { 269 flags |= PIPE_CONTROL_TLB_INVALIDATE; 270 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 271 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 272 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 273 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 274 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 275 /* 276 * TLB invalidate requires a post-sync write. 277 */ 278 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 279 } 280 281 cs = intel_ring_begin(rq, 4); 282 if (IS_ERR(cs)) 283 return PTR_ERR(cs); 284 285 *cs++ = GFX_OP_PIPE_CONTROL(4); 286 *cs++ = flags; 287 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 288 *cs++ = 0; 289 intel_ring_advance(rq, cs); 290 291 return 0; 292 } 293 294 static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 295 { 296 /* First we do the gen6_emit_post_sync_nonzero_flush w/a */ 297 *cs++ = GFX_OP_PIPE_CONTROL(4); 298 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 299 *cs++ = 0; 300 *cs++ = 0; 301 302 *cs++ = GFX_OP_PIPE_CONTROL(4); 303 *cs++ = PIPE_CONTROL_QW_WRITE; 304 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 305 INTEL_GT_SCRATCH_FIELD_DEFAULT) | 306 PIPE_CONTROL_GLOBAL_GTT; 307 *cs++ = 0; 308 309 /* Finally we can flush and with it emit the breadcrumb */ 310 *cs++ = GFX_OP_PIPE_CONTROL(4); 311 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 312 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 313 PIPE_CONTROL_DC_FLUSH_ENABLE | 314 PIPE_CONTROL_QW_WRITE | 315 PIPE_CONTROL_CS_STALL); 316 *cs++ = i915_request_active_timeline(rq)->hwsp_offset | 317 PIPE_CONTROL_GLOBAL_GTT; 318 *cs++ = rq->fence.seqno; 319 320 *cs++ = MI_USER_INTERRUPT; 321 *cs++ = MI_NOOP; 322 323 rq->tail = intel_ring_offset(rq, cs); 324 assert_ring_tail_valid(rq->ring, rq->tail); 325 326 return cs; 327 } 328 329 static int 330 gen7_render_ring_cs_stall_wa(struct i915_request *rq) 331 { 332 u32 *cs; 333 334 cs = intel_ring_begin(rq, 4); 335 if (IS_ERR(cs)) 336 return PTR_ERR(cs); 337 338 *cs++ = GFX_OP_PIPE_CONTROL(4); 339 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 340 *cs++ = 0; 341 *cs++ = 0; 342 intel_ring_advance(rq, cs); 343 344 return 0; 345 } 346 347 static int 348 gen7_render_ring_flush(struct i915_request *rq, u32 mode) 349 { 350 u32 scratch_addr = 351 intel_gt_scratch_offset(rq->engine->gt, 352 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); 353 u32 *cs, flags = 0; 354 355 /* 356 * Ensure that any following seqno writes only happen when the render 357 * cache is indeed flushed. 358 * 359 * Workaround: 4th PIPE_CONTROL command (except the ones with only 360 * read-cache invalidate bits set) must have the CS_STALL bit set. We 361 * don't try to be clever and just set it unconditionally. 362 */ 363 flags |= PIPE_CONTROL_CS_STALL; 364 365 /* Just flush everything. Experiments have shown that reducing the 366 * number of bits based on the write domains has little performance 367 * impact. 368 */ 369 if (mode & EMIT_FLUSH) { 370 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 371 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 372 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 373 flags |= PIPE_CONTROL_FLUSH_ENABLE; 374 } 375 if (mode & EMIT_INVALIDATE) { 376 flags |= PIPE_CONTROL_TLB_INVALIDATE; 377 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 378 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 379 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 380 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 381 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 382 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 383 /* 384 * TLB invalidate requires a post-sync write. 385 */ 386 flags |= PIPE_CONTROL_QW_WRITE; 387 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 388 389 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 390 391 /* Workaround: we must issue a pipe_control with CS-stall bit 392 * set before a pipe_control command that has the state cache 393 * invalidate bit set. */ 394 gen7_render_ring_cs_stall_wa(rq); 395 } 396 397 cs = intel_ring_begin(rq, 4); 398 if (IS_ERR(cs)) 399 return PTR_ERR(cs); 400 401 *cs++ = GFX_OP_PIPE_CONTROL(4); 402 *cs++ = flags; 403 *cs++ = scratch_addr; 404 *cs++ = 0; 405 intel_ring_advance(rq, cs); 406 407 return 0; 408 } 409 410 static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 411 { 412 *cs++ = GFX_OP_PIPE_CONTROL(4); 413 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 414 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 415 PIPE_CONTROL_DC_FLUSH_ENABLE | 416 PIPE_CONTROL_FLUSH_ENABLE | 417 PIPE_CONTROL_QW_WRITE | 418 PIPE_CONTROL_GLOBAL_GTT_IVB | 419 PIPE_CONTROL_CS_STALL); 420 *cs++ = i915_request_active_timeline(rq)->hwsp_offset; 421 *cs++ = rq->fence.seqno; 422 423 *cs++ = MI_USER_INTERRUPT; 424 *cs++ = MI_NOOP; 425 426 rq->tail = intel_ring_offset(rq, cs); 427 assert_ring_tail_valid(rq->ring, rq->tail); 428 429 return cs; 430 } 431 432 static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 433 { 434 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 435 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 436 437 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 438 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; 439 *cs++ = rq->fence.seqno; 440 441 *cs++ = MI_USER_INTERRUPT; 442 443 rq->tail = intel_ring_offset(rq, cs); 444 assert_ring_tail_valid(rq->ring, rq->tail); 445 446 return cs; 447 } 448 449 #define GEN7_XCS_WA 32 450 static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 451 { 452 int i; 453 454 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 455 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 456 457 *cs++ = MI_FLUSH_DW; 458 *cs++ = 0; 459 *cs++ = 0; 460 461 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 462 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; 463 *cs++ = rq->fence.seqno; 464 465 for (i = 0; i < GEN7_XCS_WA; i++) { 466 *cs++ = MI_STORE_DWORD_INDEX; 467 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 468 *cs++ = rq->fence.seqno; 469 } 470 471 *cs++ = MI_FLUSH_DW; 472 *cs++ = 0; 473 *cs++ = 0; 474 475 *cs++ = MI_USER_INTERRUPT; 476 477 rq->tail = intel_ring_offset(rq, cs); 478 assert_ring_tail_valid(rq->ring, rq->tail); 479 480 return cs; 481 } 482 #undef GEN7_XCS_WA 483 484 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 485 { 486 /* 487 * Keep the render interrupt unmasked as this papers over 488 * lost interrupts following a reset. 489 */ 490 if (engine->class == RENDER_CLASS) { 491 if (INTEL_GEN(engine->i915) >= 6) 492 mask &= ~BIT(0); 493 else 494 mask &= ~I915_USER_INTERRUPT; 495 } 496 497 intel_engine_set_hwsp_writemask(engine, mask); 498 } 499 500 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 501 { 502 struct drm_i915_private *dev_priv = engine->i915; 503 u32 addr; 504 505 addr = lower_32_bits(phys); 506 if (INTEL_GEN(dev_priv) >= 4) 507 addr |= (phys >> 28) & 0xf0; 508 509 I915_WRITE(HWS_PGA, addr); 510 } 511 512 static struct page *status_page(struct intel_engine_cs *engine) 513 { 514 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 515 516 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 517 return sg_page(obj->mm.pages->sgl); 518 } 519 520 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 521 { 522 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 523 set_hwstam(engine, ~0u); 524 } 525 526 static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 527 { 528 struct drm_i915_private *dev_priv = engine->i915; 529 i915_reg_t hwsp; 530 531 /* 532 * The ring status page addresses are no longer next to the rest of 533 * the ring registers as of gen7. 534 */ 535 if (IS_GEN(dev_priv, 7)) { 536 switch (engine->id) { 537 /* 538 * No more rings exist on Gen7. Default case is only to shut up 539 * gcc switch check warning. 540 */ 541 default: 542 GEM_BUG_ON(engine->id); 543 /* fallthrough */ 544 case RCS0: 545 hwsp = RENDER_HWS_PGA_GEN7; 546 break; 547 case BCS0: 548 hwsp = BLT_HWS_PGA_GEN7; 549 break; 550 case VCS0: 551 hwsp = BSD_HWS_PGA_GEN7; 552 break; 553 case VECS0: 554 hwsp = VEBOX_HWS_PGA_GEN7; 555 break; 556 } 557 } else if (IS_GEN(dev_priv, 6)) { 558 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 559 } else { 560 hwsp = RING_HWS_PGA(engine->mmio_base); 561 } 562 563 I915_WRITE(hwsp, offset); 564 POSTING_READ(hwsp); 565 } 566 567 static void flush_cs_tlb(struct intel_engine_cs *engine) 568 { 569 struct drm_i915_private *dev_priv = engine->i915; 570 571 if (!IS_GEN_RANGE(dev_priv, 6, 7)) 572 return; 573 574 /* ring should be idle before issuing a sync flush*/ 575 WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 576 577 ENGINE_WRITE(engine, RING_INSTPM, 578 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 579 INSTPM_SYNC_FLUSH)); 580 if (intel_wait_for_register(engine->uncore, 581 RING_INSTPM(engine->mmio_base), 582 INSTPM_SYNC_FLUSH, 0, 583 1000)) 584 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 585 engine->name); 586 } 587 588 static void ring_setup_status_page(struct intel_engine_cs *engine) 589 { 590 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 591 set_hwstam(engine, ~0u); 592 593 flush_cs_tlb(engine); 594 } 595 596 static bool stop_ring(struct intel_engine_cs *engine) 597 { 598 struct drm_i915_private *dev_priv = engine->i915; 599 600 if (INTEL_GEN(dev_priv) > 2) { 601 ENGINE_WRITE(engine, 602 RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING)); 603 if (intel_wait_for_register(engine->uncore, 604 RING_MI_MODE(engine->mmio_base), 605 MODE_IDLE, 606 MODE_IDLE, 607 1000)) { 608 DRM_ERROR("%s : timed out trying to stop ring\n", 609 engine->name); 610 611 /* 612 * Sometimes we observe that the idle flag is not 613 * set even though the ring is empty. So double 614 * check before giving up. 615 */ 616 if (ENGINE_READ(engine, RING_HEAD) != 617 ENGINE_READ(engine, RING_TAIL)) 618 return false; 619 } 620 } 621 622 ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL)); 623 624 ENGINE_WRITE(engine, RING_HEAD, 0); 625 ENGINE_WRITE(engine, RING_TAIL, 0); 626 627 /* The ring must be empty before it is disabled */ 628 ENGINE_WRITE(engine, RING_CTL, 0); 629 630 return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0; 631 } 632 633 static int xcs_resume(struct intel_engine_cs *engine) 634 { 635 struct drm_i915_private *dev_priv = engine->i915; 636 struct intel_ring *ring = engine->legacy.ring; 637 int ret = 0; 638 639 GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n", 640 engine->name, ring->head, ring->tail); 641 642 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); 643 644 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 645 if (!stop_ring(engine)) { 646 /* G45 ring initialization often fails to reset head to zero */ 647 DRM_DEBUG_DRIVER("%s head not reset to zero " 648 "ctl %08x head %08x tail %08x start %08x\n", 649 engine->name, 650 ENGINE_READ(engine, RING_CTL), 651 ENGINE_READ(engine, RING_HEAD), 652 ENGINE_READ(engine, RING_TAIL), 653 ENGINE_READ(engine, RING_START)); 654 655 if (!stop_ring(engine)) { 656 DRM_ERROR("failed to set %s head to zero " 657 "ctl %08x head %08x tail %08x start %08x\n", 658 engine->name, 659 ENGINE_READ(engine, RING_CTL), 660 ENGINE_READ(engine, RING_HEAD), 661 ENGINE_READ(engine, RING_TAIL), 662 ENGINE_READ(engine, RING_START)); 663 ret = -EIO; 664 goto out; 665 } 666 } 667 668 if (HWS_NEEDS_PHYSICAL(dev_priv)) 669 ring_setup_phys_status_page(engine); 670 else 671 ring_setup_status_page(engine); 672 673 intel_engine_reset_breadcrumbs(engine); 674 675 /* Enforce ordering by reading HEAD register back */ 676 ENGINE_POSTING_READ(engine, RING_HEAD); 677 678 /* 679 * Initialize the ring. This must happen _after_ we've cleared the ring 680 * registers with the above sequence (the readback of the HEAD registers 681 * also enforces ordering), otherwise the hw might lose the new ring 682 * register values. 683 */ 684 ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma)); 685 686 /* Check that the ring offsets point within the ring! */ 687 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 688 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 689 intel_ring_update_space(ring); 690 691 /* First wake the ring up to an empty/idle ring */ 692 ENGINE_WRITE(engine, RING_HEAD, ring->head); 693 ENGINE_WRITE(engine, RING_TAIL, ring->head); 694 ENGINE_POSTING_READ(engine, RING_TAIL); 695 696 ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); 697 698 /* If the head is still not zero, the ring is dead */ 699 if (intel_wait_for_register(engine->uncore, 700 RING_CTL(engine->mmio_base), 701 RING_VALID, RING_VALID, 702 50)) { 703 DRM_ERROR("%s initialization failed " 704 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 705 engine->name, 706 ENGINE_READ(engine, RING_CTL), 707 ENGINE_READ(engine, RING_CTL) & RING_VALID, 708 ENGINE_READ(engine, RING_HEAD), ring->head, 709 ENGINE_READ(engine, RING_TAIL), ring->tail, 710 ENGINE_READ(engine, RING_START), 711 i915_ggtt_offset(ring->vma)); 712 ret = -EIO; 713 goto out; 714 } 715 716 if (INTEL_GEN(dev_priv) > 2) 717 ENGINE_WRITE(engine, 718 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 719 720 /* Now awake, let it get started */ 721 if (ring->tail != ring->head) { 722 ENGINE_WRITE(engine, RING_TAIL, ring->tail); 723 ENGINE_POSTING_READ(engine, RING_TAIL); 724 } 725 726 /* Papering over lost _interrupts_ immediately following the restart */ 727 intel_engine_queue_breadcrumbs(engine); 728 out: 729 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); 730 731 return ret; 732 } 733 734 static void reset_prepare(struct intel_engine_cs *engine) 735 { 736 struct intel_uncore *uncore = engine->uncore; 737 const u32 base = engine->mmio_base; 738 739 /* 740 * We stop engines, otherwise we might get failed reset and a 741 * dead gpu (on elk). Also as modern gpu as kbl can suffer 742 * from system hang if batchbuffer is progressing when 743 * the reset is issued, regardless of READY_TO_RESET ack. 744 * Thus assume it is best to stop engines on all gens 745 * where we have a gpu reset. 746 * 747 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 748 * 749 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 750 * 751 * FIXME: Wa for more modern gens needs to be validated 752 */ 753 GEM_TRACE("%s\n", engine->name); 754 755 if (intel_engine_stop_cs(engine)) 756 GEM_TRACE("%s: timed out on STOP_RING\n", engine->name); 757 758 intel_uncore_write_fw(uncore, 759 RING_HEAD(base), 760 intel_uncore_read_fw(uncore, RING_TAIL(base))); 761 intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ 762 763 intel_uncore_write_fw(uncore, RING_HEAD(base), 0); 764 intel_uncore_write_fw(uncore, RING_TAIL(base), 0); 765 intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); 766 767 /* The ring must be empty before it is disabled */ 768 intel_uncore_write_fw(uncore, RING_CTL(base), 0); 769 770 /* Check acts as a post */ 771 if (intel_uncore_read_fw(uncore, RING_HEAD(base))) 772 GEM_TRACE("%s: ring head [%x] not parked\n", 773 engine->name, 774 intel_uncore_read_fw(uncore, RING_HEAD(base))); 775 } 776 777 static void reset_ring(struct intel_engine_cs *engine, bool stalled) 778 { 779 struct i915_request *pos, *rq; 780 unsigned long flags; 781 u32 head; 782 783 rq = NULL; 784 spin_lock_irqsave(&engine->active.lock, flags); 785 list_for_each_entry(pos, &engine->active.requests, sched.link) { 786 if (!i915_request_completed(pos)) { 787 rq = pos; 788 break; 789 } 790 } 791 792 /* 793 * The guilty request will get skipped on a hung engine. 794 * 795 * Users of client default contexts do not rely on logical 796 * state preserved between batches so it is safe to execute 797 * queued requests following the hang. Non default contexts 798 * rely on preserved state, so skipping a batch loses the 799 * evolution of the state and it needs to be considered corrupted. 800 * Executing more queued batches on top of corrupted state is 801 * risky. But we take the risk by trying to advance through 802 * the queued requests in order to make the client behaviour 803 * more predictable around resets, by not throwing away random 804 * amount of batches it has prepared for execution. Sophisticated 805 * clients can use gem_reset_stats_ioctl and dma fence status 806 * (exported via sync_file info ioctl on explicit fences) to observe 807 * when it loses the context state and should rebuild accordingly. 808 * 809 * The context ban, and ultimately the client ban, mechanism are safety 810 * valves if client submission ends up resulting in nothing more than 811 * subsequent hangs. 812 */ 813 814 if (rq) { 815 /* 816 * Try to restore the logical GPU state to match the 817 * continuation of the request queue. If we skip the 818 * context/PD restore, then the next request may try to execute 819 * assuming that its context is valid and loaded on the GPU and 820 * so may try to access invalid memory, prompting repeated GPU 821 * hangs. 822 * 823 * If the request was guilty, we still restore the logical 824 * state in case the next request requires it (e.g. the 825 * aliasing ppgtt), but skip over the hung batch. 826 * 827 * If the request was innocent, we try to replay the request 828 * with the restored context. 829 */ 830 __i915_request_reset(rq, stalled); 831 832 GEM_BUG_ON(rq->ring != engine->legacy.ring); 833 head = rq->head; 834 } else { 835 head = engine->legacy.ring->tail; 836 } 837 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 838 839 spin_unlock_irqrestore(&engine->active.lock, flags); 840 } 841 842 static void reset_finish(struct intel_engine_cs *engine) 843 { 844 } 845 846 static int rcs_resume(struct intel_engine_cs *engine) 847 { 848 struct drm_i915_private *dev_priv = engine->i915; 849 850 /* 851 * Disable CONSTANT_BUFFER before it is loaded from the context 852 * image. For as it is loaded, it is executed and the stored 853 * address may no longer be valid, leading to a GPU hang. 854 * 855 * This imposes the requirement that userspace reload their 856 * CONSTANT_BUFFER on every batch, fortunately a requirement 857 * they are already accustomed to from before contexts were 858 * enabled. 859 */ 860 if (IS_GEN(dev_priv, 4)) 861 I915_WRITE(ECOSKPD, 862 _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE)); 863 864 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 865 if (IS_GEN_RANGE(dev_priv, 4, 6)) 866 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 867 868 /* We need to disable the AsyncFlip performance optimisations in order 869 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 870 * programmed to '1' on all products. 871 * 872 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 873 */ 874 if (IS_GEN_RANGE(dev_priv, 6, 7)) 875 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 876 877 /* Required for the hardware to program scanline values for waiting */ 878 /* WaEnableFlushTlbInvalidationMode:snb */ 879 if (IS_GEN(dev_priv, 6)) 880 I915_WRITE(GFX_MODE, 881 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 882 883 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 884 if (IS_GEN(dev_priv, 7)) 885 I915_WRITE(GFX_MODE_GEN7, 886 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 887 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 888 889 if (IS_GEN(dev_priv, 6)) { 890 /* From the Sandybridge PRM, volume 1 part 3, page 24: 891 * "If this bit is set, STCunit will have LRA as replacement 892 * policy. [...] This bit must be reset. LRA replacement 893 * policy is not supported." 894 */ 895 I915_WRITE(CACHE_MODE_0, 896 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 897 } 898 899 if (IS_GEN_RANGE(dev_priv, 6, 7)) 900 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 901 902 return xcs_resume(engine); 903 } 904 905 static void cancel_requests(struct intel_engine_cs *engine) 906 { 907 struct i915_request *request; 908 unsigned long flags; 909 910 spin_lock_irqsave(&engine->active.lock, flags); 911 912 /* Mark all submitted requests as skipped. */ 913 list_for_each_entry(request, &engine->active.requests, sched.link) { 914 if (!i915_request_signaled(request)) 915 dma_fence_set_error(&request->fence, -EIO); 916 917 i915_request_mark_complete(request); 918 } 919 920 /* Remaining _unready_ requests will be nop'ed when submitted */ 921 922 spin_unlock_irqrestore(&engine->active.lock, flags); 923 } 924 925 static void i9xx_submit_request(struct i915_request *request) 926 { 927 i915_request_submit(request); 928 wmb(); /* paranoid flush writes out of the WCB before mmio */ 929 930 ENGINE_WRITE(request->engine, RING_TAIL, 931 intel_ring_set_tail(request->ring, request->tail)); 932 } 933 934 static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) 935 { 936 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 937 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 938 939 *cs++ = MI_FLUSH; 940 941 *cs++ = MI_STORE_DWORD_INDEX; 942 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 943 *cs++ = rq->fence.seqno; 944 945 *cs++ = MI_USER_INTERRUPT; 946 *cs++ = MI_NOOP; 947 948 rq->tail = intel_ring_offset(rq, cs); 949 assert_ring_tail_valid(rq->ring, rq->tail); 950 951 return cs; 952 } 953 954 #define GEN5_WA_STORES 8 /* must be at least 1! */ 955 static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) 956 { 957 int i; 958 959 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 960 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 961 962 *cs++ = MI_FLUSH; 963 964 BUILD_BUG_ON(GEN5_WA_STORES < 1); 965 for (i = 0; i < GEN5_WA_STORES; i++) { 966 *cs++ = MI_STORE_DWORD_INDEX; 967 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 968 *cs++ = rq->fence.seqno; 969 } 970 971 *cs++ = MI_USER_INTERRUPT; 972 973 rq->tail = intel_ring_offset(rq, cs); 974 assert_ring_tail_valid(rq->ring, rq->tail); 975 976 return cs; 977 } 978 #undef GEN5_WA_STORES 979 980 static void 981 gen5_irq_enable(struct intel_engine_cs *engine) 982 { 983 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask); 984 } 985 986 static void 987 gen5_irq_disable(struct intel_engine_cs *engine) 988 { 989 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask); 990 } 991 992 static void 993 i9xx_irq_enable(struct intel_engine_cs *engine) 994 { 995 engine->i915->irq_mask &= ~engine->irq_enable_mask; 996 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); 997 intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); 998 } 999 1000 static void 1001 i9xx_irq_disable(struct intel_engine_cs *engine) 1002 { 1003 engine->i915->irq_mask |= engine->irq_enable_mask; 1004 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); 1005 } 1006 1007 static void 1008 i8xx_irq_enable(struct intel_engine_cs *engine) 1009 { 1010 struct drm_i915_private *i915 = engine->i915; 1011 1012 i915->irq_mask &= ~engine->irq_enable_mask; 1013 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); 1014 ENGINE_POSTING_READ16(engine, RING_IMR); 1015 } 1016 1017 static void 1018 i8xx_irq_disable(struct intel_engine_cs *engine) 1019 { 1020 struct drm_i915_private *i915 = engine->i915; 1021 1022 i915->irq_mask |= engine->irq_enable_mask; 1023 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); 1024 } 1025 1026 static int 1027 bsd_ring_flush(struct i915_request *rq, u32 mode) 1028 { 1029 u32 *cs; 1030 1031 cs = intel_ring_begin(rq, 2); 1032 if (IS_ERR(cs)) 1033 return PTR_ERR(cs); 1034 1035 *cs++ = MI_FLUSH; 1036 *cs++ = MI_NOOP; 1037 intel_ring_advance(rq, cs); 1038 return 0; 1039 } 1040 1041 static void 1042 gen6_irq_enable(struct intel_engine_cs *engine) 1043 { 1044 ENGINE_WRITE(engine, RING_IMR, 1045 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 1046 1047 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ 1048 ENGINE_POSTING_READ(engine, RING_IMR); 1049 1050 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask); 1051 } 1052 1053 static void 1054 gen6_irq_disable(struct intel_engine_cs *engine) 1055 { 1056 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); 1057 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask); 1058 } 1059 1060 static void 1061 hsw_vebox_irq_enable(struct intel_engine_cs *engine) 1062 { 1063 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask); 1064 1065 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ 1066 ENGINE_POSTING_READ(engine, RING_IMR); 1067 1068 gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask); 1069 } 1070 1071 static void 1072 hsw_vebox_irq_disable(struct intel_engine_cs *engine) 1073 { 1074 ENGINE_WRITE(engine, RING_IMR, ~0); 1075 gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask); 1076 } 1077 1078 static int 1079 i965_emit_bb_start(struct i915_request *rq, 1080 u64 offset, u32 length, 1081 unsigned int dispatch_flags) 1082 { 1083 u32 *cs; 1084 1085 cs = intel_ring_begin(rq, 2); 1086 if (IS_ERR(cs)) 1087 return PTR_ERR(cs); 1088 1089 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & 1090 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965); 1091 *cs++ = offset; 1092 intel_ring_advance(rq, cs); 1093 1094 return 0; 1095 } 1096 1097 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1098 #define I830_BATCH_LIMIT SZ_256K 1099 #define I830_TLB_ENTRIES (2) 1100 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1101 static int 1102 i830_emit_bb_start(struct i915_request *rq, 1103 u64 offset, u32 len, 1104 unsigned int dispatch_flags) 1105 { 1106 u32 *cs, cs_offset = 1107 intel_gt_scratch_offset(rq->engine->gt, 1108 INTEL_GT_SCRATCH_FIELD_DEFAULT); 1109 1110 GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE); 1111 1112 cs = intel_ring_begin(rq, 6); 1113 if (IS_ERR(cs)) 1114 return PTR_ERR(cs); 1115 1116 /* Evict the invalid PTE TLBs */ 1117 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA; 1118 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096; 1119 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */ 1120 *cs++ = cs_offset; 1121 *cs++ = 0xdeadbeef; 1122 *cs++ = MI_NOOP; 1123 intel_ring_advance(rq, cs); 1124 1125 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 1126 if (len > I830_BATCH_LIMIT) 1127 return -ENOSPC; 1128 1129 cs = intel_ring_begin(rq, 6 + 2); 1130 if (IS_ERR(cs)) 1131 return PTR_ERR(cs); 1132 1133 /* Blit the batch (which has now all relocs applied) to the 1134 * stable batch scratch bo area (so that the CS never 1135 * stumbles over its tlb invalidation bug) ... 1136 */ 1137 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); 1138 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096; 1139 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096; 1140 *cs++ = cs_offset; 1141 *cs++ = 4096; 1142 *cs++ = offset; 1143 1144 *cs++ = MI_FLUSH; 1145 *cs++ = MI_NOOP; 1146 intel_ring_advance(rq, cs); 1147 1148 /* ... and execute it. */ 1149 offset = cs_offset; 1150 } 1151 1152 cs = intel_ring_begin(rq, 2); 1153 if (IS_ERR(cs)) 1154 return PTR_ERR(cs); 1155 1156 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1157 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1158 MI_BATCH_NON_SECURE); 1159 intel_ring_advance(rq, cs); 1160 1161 return 0; 1162 } 1163 1164 static int 1165 i915_emit_bb_start(struct i915_request *rq, 1166 u64 offset, u32 len, 1167 unsigned int dispatch_flags) 1168 { 1169 u32 *cs; 1170 1171 cs = intel_ring_begin(rq, 2); 1172 if (IS_ERR(cs)) 1173 return PTR_ERR(cs); 1174 1175 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1176 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1177 MI_BATCH_NON_SECURE); 1178 intel_ring_advance(rq, cs); 1179 1180 return 0; 1181 } 1182 1183 static void __ring_context_fini(struct intel_context *ce) 1184 { 1185 i915_vma_put(ce->state); 1186 } 1187 1188 static void ring_context_destroy(struct kref *ref) 1189 { 1190 struct intel_context *ce = container_of(ref, typeof(*ce), ref); 1191 1192 GEM_BUG_ON(intel_context_is_pinned(ce)); 1193 1194 if (ce->state) 1195 __ring_context_fini(ce); 1196 1197 intel_context_fini(ce); 1198 intel_context_free(ce); 1199 } 1200 1201 static struct i915_address_space *vm_alias(struct intel_context *ce) 1202 { 1203 struct i915_address_space *vm; 1204 1205 vm = ce->vm; 1206 if (i915_is_ggtt(vm)) 1207 vm = &i915_vm_to_ggtt(vm)->alias->vm; 1208 1209 return vm; 1210 } 1211 1212 static int __context_pin_ppgtt(struct intel_context *ce) 1213 { 1214 struct i915_address_space *vm; 1215 int err = 0; 1216 1217 vm = vm_alias(ce); 1218 if (vm) 1219 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); 1220 1221 return err; 1222 } 1223 1224 static void __context_unpin_ppgtt(struct intel_context *ce) 1225 { 1226 struct i915_address_space *vm; 1227 1228 vm = vm_alias(ce); 1229 if (vm) 1230 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); 1231 } 1232 1233 static void ring_context_unpin(struct intel_context *ce) 1234 { 1235 __context_unpin_ppgtt(ce); 1236 } 1237 1238 static struct i915_vma * 1239 alloc_context_vma(struct intel_engine_cs *engine) 1240 { 1241 struct drm_i915_private *i915 = engine->i915; 1242 struct drm_i915_gem_object *obj; 1243 struct i915_vma *vma; 1244 int err; 1245 1246 obj = i915_gem_object_create_shmem(i915, engine->context_size); 1247 if (IS_ERR(obj)) 1248 return ERR_CAST(obj); 1249 1250 /* 1251 * Try to make the context utilize L3 as well as LLC. 1252 * 1253 * On VLV we don't have L3 controls in the PTEs so we 1254 * shouldn't touch the cache level, especially as that 1255 * would make the object snooped which might have a 1256 * negative performance impact. 1257 * 1258 * Snooping is required on non-llc platforms in execlist 1259 * mode, but since all GGTT accesses use PAT entry 0 we 1260 * get snooping anyway regardless of cache_level. 1261 * 1262 * This is only applicable for Ivy Bridge devices since 1263 * later platforms don't have L3 control bits in the PTE. 1264 */ 1265 if (IS_IVYBRIDGE(i915)) 1266 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); 1267 1268 if (engine->default_state) { 1269 void *defaults, *vaddr; 1270 1271 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 1272 if (IS_ERR(vaddr)) { 1273 err = PTR_ERR(vaddr); 1274 goto err_obj; 1275 } 1276 1277 defaults = i915_gem_object_pin_map(engine->default_state, 1278 I915_MAP_WB); 1279 if (IS_ERR(defaults)) { 1280 err = PTR_ERR(defaults); 1281 goto err_map; 1282 } 1283 1284 memcpy(vaddr, defaults, engine->context_size); 1285 i915_gem_object_unpin_map(engine->default_state); 1286 1287 i915_gem_object_flush_map(obj); 1288 i915_gem_object_unpin_map(obj); 1289 } 1290 1291 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 1292 if (IS_ERR(vma)) { 1293 err = PTR_ERR(vma); 1294 goto err_obj; 1295 } 1296 1297 return vma; 1298 1299 err_map: 1300 i915_gem_object_unpin_map(obj); 1301 err_obj: 1302 i915_gem_object_put(obj); 1303 return ERR_PTR(err); 1304 } 1305 1306 static int ring_context_alloc(struct intel_context *ce) 1307 { 1308 struct intel_engine_cs *engine = ce->engine; 1309 1310 /* One ringbuffer to rule them all */ 1311 GEM_BUG_ON(!engine->legacy.ring); 1312 ce->ring = engine->legacy.ring; 1313 ce->timeline = intel_timeline_get(engine->legacy.timeline); 1314 1315 GEM_BUG_ON(ce->state); 1316 if (engine->context_size) { 1317 struct i915_vma *vma; 1318 1319 vma = alloc_context_vma(engine); 1320 if (IS_ERR(vma)) 1321 return PTR_ERR(vma); 1322 1323 ce->state = vma; 1324 } 1325 1326 return 0; 1327 } 1328 1329 static int ring_context_pin(struct intel_context *ce) 1330 { 1331 int err; 1332 1333 err = intel_context_active_acquire(ce); 1334 if (err) 1335 return err; 1336 1337 err = __context_pin_ppgtt(ce); 1338 if (err) 1339 goto err_active; 1340 1341 return 0; 1342 1343 err_active: 1344 intel_context_active_release(ce); 1345 return err; 1346 } 1347 1348 static void ring_context_reset(struct intel_context *ce) 1349 { 1350 intel_ring_reset(ce->ring, 0); 1351 } 1352 1353 static const struct intel_context_ops ring_context_ops = { 1354 .alloc = ring_context_alloc, 1355 1356 .pin = ring_context_pin, 1357 .unpin = ring_context_unpin, 1358 1359 .enter = intel_context_enter_engine, 1360 .exit = intel_context_exit_engine, 1361 1362 .reset = ring_context_reset, 1363 .destroy = ring_context_destroy, 1364 }; 1365 1366 static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt) 1367 { 1368 const struct intel_engine_cs * const engine = rq->engine; 1369 u32 *cs; 1370 1371 cs = intel_ring_begin(rq, 6); 1372 if (IS_ERR(cs)) 1373 return PTR_ERR(cs); 1374 1375 *cs++ = MI_LOAD_REGISTER_IMM(1); 1376 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); 1377 *cs++ = PP_DIR_DCLV_2G; 1378 1379 *cs++ = MI_LOAD_REGISTER_IMM(1); 1380 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 1381 *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; 1382 1383 intel_ring_advance(rq, cs); 1384 1385 return 0; 1386 } 1387 1388 static int flush_pd_dir(struct i915_request *rq) 1389 { 1390 const struct intel_engine_cs * const engine = rq->engine; 1391 u32 *cs; 1392 1393 cs = intel_ring_begin(rq, 4); 1394 if (IS_ERR(cs)) 1395 return PTR_ERR(cs); 1396 1397 /* Stall until the page table load is complete */ 1398 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1399 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 1400 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 1401 INTEL_GT_SCRATCH_FIELD_DEFAULT); 1402 *cs++ = MI_NOOP; 1403 1404 intel_ring_advance(rq, cs); 1405 return 0; 1406 } 1407 1408 static inline int mi_set_context(struct i915_request *rq, u32 flags) 1409 { 1410 struct drm_i915_private *i915 = rq->i915; 1411 struct intel_engine_cs *engine = rq->engine; 1412 enum intel_engine_id id; 1413 const int num_engines = 1414 IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; 1415 bool force_restore = false; 1416 int len; 1417 u32 *cs; 1418 1419 flags |= MI_MM_SPACE_GTT; 1420 if (IS_HASWELL(i915)) 1421 /* These flags are for resource streamer on HSW+ */ 1422 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN; 1423 else 1424 /* We need to save the extended state for powersaving modes */ 1425 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN; 1426 1427 len = 4; 1428 if (IS_GEN(i915, 7)) 1429 len += 2 + (num_engines ? 4 * num_engines + 6 : 0); 1430 else if (IS_GEN(i915, 5)) 1431 len += 2; 1432 if (flags & MI_FORCE_RESTORE) { 1433 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 1434 flags &= ~MI_FORCE_RESTORE; 1435 force_restore = true; 1436 len += 2; 1437 } 1438 1439 cs = intel_ring_begin(rq, len); 1440 if (IS_ERR(cs)) 1441 return PTR_ERR(cs); 1442 1443 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 1444 if (IS_GEN(i915, 7)) { 1445 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1446 if (num_engines) { 1447 struct intel_engine_cs *signaller; 1448 1449 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 1450 for_each_engine(signaller, engine->gt, id) { 1451 if (signaller == engine) 1452 continue; 1453 1454 *cs++ = i915_mmio_reg_offset( 1455 RING_PSMI_CTL(signaller->mmio_base)); 1456 *cs++ = _MASKED_BIT_ENABLE( 1457 GEN6_PSMI_SLEEP_MSG_DISABLE); 1458 } 1459 } 1460 } else if (IS_GEN(i915, 5)) { 1461 /* 1462 * This w/a is only listed for pre-production ilk a/b steppings, 1463 * but is also mentioned for programming the powerctx. To be 1464 * safe, just apply the workaround; we do not use SyncFlush so 1465 * this should never take effect and so be a no-op! 1466 */ 1467 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; 1468 } 1469 1470 if (force_restore) { 1471 /* 1472 * The HW doesn't handle being told to restore the current 1473 * context very well. Quite often it likes goes to go off and 1474 * sulk, especially when it is meant to be reloading PP_DIR. 1475 * A very simple fix to force the reload is to simply switch 1476 * away from the current context and back again. 1477 * 1478 * Note that the kernel_context will contain random state 1479 * following the INHIBIT_RESTORE. We accept this since we 1480 * never use the kernel_context state; it is merely a 1481 * placeholder we use to flush other contexts. 1482 */ 1483 *cs++ = MI_SET_CONTEXT; 1484 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | 1485 MI_MM_SPACE_GTT | 1486 MI_RESTORE_INHIBIT; 1487 } 1488 1489 *cs++ = MI_NOOP; 1490 *cs++ = MI_SET_CONTEXT; 1491 *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags; 1492 /* 1493 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 1494 * WaMiSetContext_Hang:snb,ivb,vlv 1495 */ 1496 *cs++ = MI_NOOP; 1497 1498 if (IS_GEN(i915, 7)) { 1499 if (num_engines) { 1500 struct intel_engine_cs *signaller; 1501 i915_reg_t last_reg = {}; /* keep gcc quiet */ 1502 1503 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 1504 for_each_engine(signaller, engine->gt, id) { 1505 if (signaller == engine) 1506 continue; 1507 1508 last_reg = RING_PSMI_CTL(signaller->mmio_base); 1509 *cs++ = i915_mmio_reg_offset(last_reg); 1510 *cs++ = _MASKED_BIT_DISABLE( 1511 GEN6_PSMI_SLEEP_MSG_DISABLE); 1512 } 1513 1514 /* Insert a delay before the next switch! */ 1515 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1516 *cs++ = i915_mmio_reg_offset(last_reg); 1517 *cs++ = intel_gt_scratch_offset(engine->gt, 1518 INTEL_GT_SCRATCH_FIELD_DEFAULT); 1519 *cs++ = MI_NOOP; 1520 } 1521 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1522 } else if (IS_GEN(i915, 5)) { 1523 *cs++ = MI_SUSPEND_FLUSH; 1524 } 1525 1526 intel_ring_advance(rq, cs); 1527 1528 return 0; 1529 } 1530 1531 static int remap_l3_slice(struct i915_request *rq, int slice) 1532 { 1533 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; 1534 int i; 1535 1536 if (!remap_info) 1537 return 0; 1538 1539 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2); 1540 if (IS_ERR(cs)) 1541 return PTR_ERR(cs); 1542 1543 /* 1544 * Note: We do not worry about the concurrent register cacheline hang 1545 * here because no other code should access these registers other than 1546 * at initialization time. 1547 */ 1548 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4); 1549 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { 1550 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 1551 *cs++ = remap_info[i]; 1552 } 1553 *cs++ = MI_NOOP; 1554 intel_ring_advance(rq, cs); 1555 1556 return 0; 1557 } 1558 1559 static int remap_l3(struct i915_request *rq) 1560 { 1561 struct i915_gem_context *ctx = rq->gem_context; 1562 int i, err; 1563 1564 if (!ctx->remap_slice) 1565 return 0; 1566 1567 for (i = 0; i < MAX_L3_SLICES; i++) { 1568 if (!(ctx->remap_slice & BIT(i))) 1569 continue; 1570 1571 err = remap_l3_slice(rq, i); 1572 if (err) 1573 return err; 1574 } 1575 1576 ctx->remap_slice = 0; 1577 return 0; 1578 } 1579 1580 static int switch_context(struct i915_request *rq) 1581 { 1582 struct intel_context *ce = rq->hw_context; 1583 struct i915_address_space *vm = vm_alias(ce); 1584 int ret; 1585 1586 GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); 1587 1588 if (vm) { 1589 ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm)); 1590 if (ret) 1591 return ret; 1592 } 1593 1594 if (ce->state) { 1595 u32 hw_flags; 1596 1597 GEM_BUG_ON(rq->engine->id != RCS0); 1598 1599 /* 1600 * The kernel context(s) is treated as pure scratch and is not 1601 * expected to retain any state (as we sacrifice it during 1602 * suspend and on resume it may be corrupted). This is ok, 1603 * as nothing actually executes using the kernel context; it 1604 * is purely used for flushing user contexts. 1605 */ 1606 hw_flags = 0; 1607 if (i915_gem_context_is_kernel(rq->gem_context)) 1608 hw_flags = MI_RESTORE_INHIBIT; 1609 1610 ret = mi_set_context(rq, hw_flags); 1611 if (ret) 1612 return ret; 1613 } 1614 1615 if (vm) { 1616 struct intel_engine_cs *engine = rq->engine; 1617 1618 ret = engine->emit_flush(rq, EMIT_INVALIDATE); 1619 if (ret) 1620 return ret; 1621 1622 ret = flush_pd_dir(rq); 1623 if (ret) 1624 return ret; 1625 1626 /* 1627 * Not only do we need a full barrier (post-sync write) after 1628 * invalidating the TLBs, but we need to wait a little bit 1629 * longer. Whether this is merely delaying us, or the 1630 * subsequent flush is a key part of serialising with the 1631 * post-sync op, this extra pass appears vital before a 1632 * mm switch! 1633 */ 1634 ret = engine->emit_flush(rq, EMIT_INVALIDATE); 1635 if (ret) 1636 return ret; 1637 1638 ret = engine->emit_flush(rq, EMIT_FLUSH); 1639 if (ret) 1640 return ret; 1641 } 1642 1643 ret = remap_l3(rq); 1644 if (ret) 1645 return ret; 1646 1647 return 0; 1648 } 1649 1650 static int ring_request_alloc(struct i915_request *request) 1651 { 1652 int ret; 1653 1654 GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); 1655 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); 1656 1657 /* 1658 * Flush enough space to reduce the likelihood of waiting after 1659 * we start building the request - in which case we will just 1660 * have to repeat work. 1661 */ 1662 request->reserved_space += LEGACY_REQUEST_SIZE; 1663 1664 /* Unconditionally invalidate GPU caches and TLBs. */ 1665 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 1666 if (ret) 1667 return ret; 1668 1669 ret = switch_context(request); 1670 if (ret) 1671 return ret; 1672 1673 request->reserved_space -= LEGACY_REQUEST_SIZE; 1674 return 0; 1675 } 1676 1677 static void gen6_bsd_submit_request(struct i915_request *request) 1678 { 1679 struct intel_uncore *uncore = request->engine->uncore; 1680 1681 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1682 1683 /* Every tail move must follow the sequence below */ 1684 1685 /* Disable notification that the ring is IDLE. The GT 1686 * will then assume that it is busy and bring it out of rc6. 1687 */ 1688 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 1689 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1690 1691 /* Clear the context id. Here be magic! */ 1692 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); 1693 1694 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1695 if (__intel_wait_for_register_fw(uncore, 1696 GEN6_BSD_SLEEP_PSMI_CONTROL, 1697 GEN6_BSD_SLEEP_INDICATOR, 1698 0, 1699 1000, 0, NULL)) 1700 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 1701 1702 /* Now that the ring is fully powered up, update the tail */ 1703 i9xx_submit_request(request); 1704 1705 /* Let the ring send IDLE messages to the GT again, 1706 * and so let it sleep to conserve power when idle. 1707 */ 1708 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 1709 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1710 1711 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1712 } 1713 1714 static int mi_flush_dw(struct i915_request *rq, u32 flags) 1715 { 1716 u32 cmd, *cs; 1717 1718 cs = intel_ring_begin(rq, 4); 1719 if (IS_ERR(cs)) 1720 return PTR_ERR(cs); 1721 1722 cmd = MI_FLUSH_DW; 1723 1724 /* 1725 * We always require a command barrier so that subsequent 1726 * commands, such as breadcrumb interrupts, are strictly ordered 1727 * wrt the contents of the write cache being flushed to memory 1728 * (and thus being coherent from the CPU). 1729 */ 1730 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1731 1732 /* 1733 * Bspec vol 1c.3 - blitter engine command streamer: 1734 * "If ENABLED, all TLBs will be invalidated once the flush 1735 * operation is complete. This bit is only valid when the 1736 * Post-Sync Operation field is a value of 1h or 3h." 1737 */ 1738 cmd |= flags; 1739 1740 *cs++ = cmd; 1741 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 1742 *cs++ = 0; 1743 *cs++ = MI_NOOP; 1744 1745 intel_ring_advance(rq, cs); 1746 1747 return 0; 1748 } 1749 1750 static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) 1751 { 1752 return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0); 1753 } 1754 1755 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) 1756 { 1757 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD); 1758 } 1759 1760 static int 1761 hsw_emit_bb_start(struct i915_request *rq, 1762 u64 offset, u32 len, 1763 unsigned int dispatch_flags) 1764 { 1765 u32 *cs; 1766 1767 cs = intel_ring_begin(rq, 2); 1768 if (IS_ERR(cs)) 1769 return PTR_ERR(cs); 1770 1771 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 1772 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW); 1773 /* bit0-7 is the length on GEN6+ */ 1774 *cs++ = offset; 1775 intel_ring_advance(rq, cs); 1776 1777 return 0; 1778 } 1779 1780 static int 1781 gen6_emit_bb_start(struct i915_request *rq, 1782 u64 offset, u32 len, 1783 unsigned int dispatch_flags) 1784 { 1785 u32 *cs; 1786 1787 cs = intel_ring_begin(rq, 2); 1788 if (IS_ERR(cs)) 1789 return PTR_ERR(cs); 1790 1791 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 1792 0 : MI_BATCH_NON_SECURE_I965); 1793 /* bit0-7 is the length on GEN6+ */ 1794 *cs++ = offset; 1795 intel_ring_advance(rq, cs); 1796 1797 return 0; 1798 } 1799 1800 /* Blitter support (SandyBridge+) */ 1801 1802 static int gen6_ring_flush(struct i915_request *rq, u32 mode) 1803 { 1804 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB); 1805 } 1806 1807 static void i9xx_set_default_submission(struct intel_engine_cs *engine) 1808 { 1809 engine->submit_request = i9xx_submit_request; 1810 engine->cancel_requests = cancel_requests; 1811 1812 engine->park = NULL; 1813 engine->unpark = NULL; 1814 } 1815 1816 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 1817 { 1818 i9xx_set_default_submission(engine); 1819 engine->submit_request = gen6_bsd_submit_request; 1820 } 1821 1822 static void ring_destroy(struct intel_engine_cs *engine) 1823 { 1824 struct drm_i915_private *dev_priv = engine->i915; 1825 1826 WARN_ON(INTEL_GEN(dev_priv) > 2 && 1827 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 1828 1829 intel_engine_cleanup_common(engine); 1830 1831 intel_ring_unpin(engine->legacy.ring); 1832 intel_ring_put(engine->legacy.ring); 1833 1834 intel_timeline_unpin(engine->legacy.timeline); 1835 intel_timeline_put(engine->legacy.timeline); 1836 1837 kfree(engine); 1838 } 1839 1840 static void setup_irq(struct intel_engine_cs *engine) 1841 { 1842 struct drm_i915_private *i915 = engine->i915; 1843 1844 if (INTEL_GEN(i915) >= 6) { 1845 engine->irq_enable = gen6_irq_enable; 1846 engine->irq_disable = gen6_irq_disable; 1847 } else if (INTEL_GEN(i915) >= 5) { 1848 engine->irq_enable = gen5_irq_enable; 1849 engine->irq_disable = gen5_irq_disable; 1850 } else if (INTEL_GEN(i915) >= 3) { 1851 engine->irq_enable = i9xx_irq_enable; 1852 engine->irq_disable = i9xx_irq_disable; 1853 } else { 1854 engine->irq_enable = i8xx_irq_enable; 1855 engine->irq_disable = i8xx_irq_disable; 1856 } 1857 } 1858 1859 static void setup_common(struct intel_engine_cs *engine) 1860 { 1861 struct drm_i915_private *i915 = engine->i915; 1862 1863 /* gen8+ are only supported with execlists */ 1864 GEM_BUG_ON(INTEL_GEN(i915) >= 8); 1865 1866 setup_irq(engine); 1867 1868 engine->destroy = ring_destroy; 1869 1870 engine->resume = xcs_resume; 1871 engine->reset.prepare = reset_prepare; 1872 engine->reset.reset = reset_ring; 1873 engine->reset.finish = reset_finish; 1874 1875 engine->cops = &ring_context_ops; 1876 engine->request_alloc = ring_request_alloc; 1877 1878 /* 1879 * Using a global execution timeline; the previous final breadcrumb is 1880 * equivalent to our next initial bread so we can elide 1881 * engine->emit_init_breadcrumb(). 1882 */ 1883 engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb; 1884 if (IS_GEN(i915, 5)) 1885 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 1886 1887 engine->set_default_submission = i9xx_set_default_submission; 1888 1889 if (INTEL_GEN(i915) >= 6) 1890 engine->emit_bb_start = gen6_emit_bb_start; 1891 else if (INTEL_GEN(i915) >= 4) 1892 engine->emit_bb_start = i965_emit_bb_start; 1893 else if (IS_I830(i915) || IS_I845G(i915)) 1894 engine->emit_bb_start = i830_emit_bb_start; 1895 else 1896 engine->emit_bb_start = i915_emit_bb_start; 1897 } 1898 1899 static void setup_rcs(struct intel_engine_cs *engine) 1900 { 1901 struct drm_i915_private *i915 = engine->i915; 1902 1903 if (HAS_L3_DPF(i915)) 1904 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1905 1906 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1907 1908 if (INTEL_GEN(i915) >= 7) { 1909 engine->emit_flush = gen7_render_ring_flush; 1910 engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb; 1911 } else if (IS_GEN(i915, 6)) { 1912 engine->emit_flush = gen6_render_ring_flush; 1913 engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb; 1914 } else if (IS_GEN(i915, 5)) { 1915 engine->emit_flush = gen4_render_ring_flush; 1916 } else { 1917 if (INTEL_GEN(i915) < 4) 1918 engine->emit_flush = gen2_render_ring_flush; 1919 else 1920 engine->emit_flush = gen4_render_ring_flush; 1921 engine->irq_enable_mask = I915_USER_INTERRUPT; 1922 } 1923 1924 if (IS_HASWELL(i915)) 1925 engine->emit_bb_start = hsw_emit_bb_start; 1926 1927 engine->resume = rcs_resume; 1928 } 1929 1930 static void setup_vcs(struct intel_engine_cs *engine) 1931 { 1932 struct drm_i915_private *i915 = engine->i915; 1933 1934 if (INTEL_GEN(i915) >= 6) { 1935 /* gen6 bsd needs a special wa for tail updates */ 1936 if (IS_GEN(i915, 6)) 1937 engine->set_default_submission = gen6_bsd_set_default_submission; 1938 engine->emit_flush = gen6_bsd_ring_flush; 1939 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1940 1941 if (IS_GEN(i915, 6)) 1942 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; 1943 else 1944 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 1945 } else { 1946 engine->emit_flush = bsd_ring_flush; 1947 if (IS_GEN(i915, 5)) 1948 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 1949 else 1950 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1951 } 1952 } 1953 1954 static void setup_bcs(struct intel_engine_cs *engine) 1955 { 1956 struct drm_i915_private *i915 = engine->i915; 1957 1958 engine->emit_flush = gen6_ring_flush; 1959 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 1960 1961 if (IS_GEN(i915, 6)) 1962 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; 1963 else 1964 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 1965 } 1966 1967 static void setup_vecs(struct intel_engine_cs *engine) 1968 { 1969 struct drm_i915_private *i915 = engine->i915; 1970 1971 GEM_BUG_ON(INTEL_GEN(i915) < 7); 1972 1973 engine->emit_flush = gen6_ring_flush; 1974 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 1975 engine->irq_enable = hsw_vebox_irq_enable; 1976 engine->irq_disable = hsw_vebox_irq_disable; 1977 1978 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 1979 } 1980 1981 int intel_ring_submission_setup(struct intel_engine_cs *engine) 1982 { 1983 setup_common(engine); 1984 1985 switch (engine->class) { 1986 case RENDER_CLASS: 1987 setup_rcs(engine); 1988 break; 1989 case VIDEO_DECODE_CLASS: 1990 setup_vcs(engine); 1991 break; 1992 case COPY_ENGINE_CLASS: 1993 setup_bcs(engine); 1994 break; 1995 case VIDEO_ENHANCEMENT_CLASS: 1996 setup_vecs(engine); 1997 break; 1998 default: 1999 MISSING_CASE(engine->class); 2000 return -ENODEV; 2001 } 2002 2003 return 0; 2004 } 2005 2006 int intel_ring_submission_init(struct intel_engine_cs *engine) 2007 { 2008 struct intel_timeline *timeline; 2009 struct intel_ring *ring; 2010 int err; 2011 2012 timeline = intel_timeline_create(engine->gt, engine->status_page.vma); 2013 if (IS_ERR(timeline)) { 2014 err = PTR_ERR(timeline); 2015 goto err; 2016 } 2017 GEM_BUG_ON(timeline->has_initial_breadcrumb); 2018 2019 err = intel_timeline_pin(timeline); 2020 if (err) 2021 goto err_timeline; 2022 2023 ring = intel_engine_create_ring(engine, SZ_16K); 2024 if (IS_ERR(ring)) { 2025 err = PTR_ERR(ring); 2026 goto err_timeline_unpin; 2027 } 2028 2029 err = intel_ring_pin(ring); 2030 if (err) 2031 goto err_ring; 2032 2033 GEM_BUG_ON(engine->legacy.ring); 2034 engine->legacy.ring = ring; 2035 engine->legacy.timeline = timeline; 2036 2037 err = intel_engine_init_common(engine); 2038 if (err) 2039 goto err_ring_unpin; 2040 2041 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 2042 2043 return 0; 2044 2045 err_ring_unpin: 2046 intel_ring_unpin(ring); 2047 err_ring: 2048 intel_ring_put(ring); 2049 err_timeline_unpin: 2050 intel_timeline_unpin(timeline); 2051 err_timeline: 2052 intel_timeline_put(timeline); 2053 err: 2054 intel_engine_cleanup_common(engine); 2055 return err; 2056 } 2057