1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <linux/log2.h> 31 32 #include "gem/i915_gem_context.h" 33 34 #include "gen6_ppgtt.h" 35 #include "gen7_renderclear.h" 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_context.h" 39 #include "intel_gt.h" 40 #include "intel_gt_irq.h" 41 #include "intel_gt_pm_irq.h" 42 #include "intel_reset.h" 43 #include "intel_ring.h" 44 #include "intel_workarounds.h" 45 #include "shmem_utils.h" 46 47 /* Rough estimate of the typical request size, performing a flush, 48 * set-context and then emitting the batch. 49 */ 50 #define LEGACY_REQUEST_SIZE 200 51 52 static int 53 gen2_render_ring_flush(struct i915_request *rq, u32 mode) 54 { 55 unsigned int num_store_dw; 56 u32 cmd, *cs; 57 58 cmd = MI_FLUSH; 59 num_store_dw = 0; 60 if (mode & EMIT_INVALIDATE) 61 cmd |= MI_READ_FLUSH; 62 if (mode & EMIT_FLUSH) 63 num_store_dw = 4; 64 65 cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); 66 if (IS_ERR(cs)) 67 return PTR_ERR(cs); 68 69 *cs++ = cmd; 70 while (num_store_dw--) { 71 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 72 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 73 INTEL_GT_SCRATCH_FIELD_DEFAULT); 74 *cs++ = 0; 75 } 76 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; 77 78 intel_ring_advance(rq, cs); 79 80 return 0; 81 } 82 83 static int 84 gen4_render_ring_flush(struct i915_request *rq, u32 mode) 85 { 86 u32 cmd, *cs; 87 int i; 88 89 /* 90 * read/write caches: 91 * 92 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 93 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 94 * also flushed at 2d versus 3d pipeline switches. 95 * 96 * read-only caches: 97 * 98 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 99 * MI_READ_FLUSH is set, and is always flushed on 965. 100 * 101 * I915_GEM_DOMAIN_COMMAND may not exist? 102 * 103 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 104 * invalidated when MI_EXE_FLUSH is set. 105 * 106 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 107 * invalidated with every MI_FLUSH. 108 * 109 * TLBs: 110 * 111 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 112 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 113 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 114 * are flushed at any MI_FLUSH. 115 */ 116 117 cmd = MI_FLUSH; 118 if (mode & EMIT_INVALIDATE) { 119 cmd |= MI_EXE_FLUSH; 120 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5)) 121 cmd |= MI_INVALIDATE_ISP; 122 } 123 124 i = 2; 125 if (mode & EMIT_INVALIDATE) 126 i += 20; 127 128 cs = intel_ring_begin(rq, i); 129 if (IS_ERR(cs)) 130 return PTR_ERR(cs); 131 132 *cs++ = cmd; 133 134 /* 135 * A random delay to let the CS invalidate take effect? Without this 136 * delay, the GPU relocation path fails as the CS does not see 137 * the updated contents. Just as important, if we apply the flushes 138 * to the EMIT_FLUSH branch (i.e. immediately after the relocation 139 * write and before the invalidate on the next batch), the relocations 140 * still fail. This implies that is a delay following invalidation 141 * that is required to reset the caches as opposed to a delay to 142 * ensure the memory is written. 143 */ 144 if (mode & EMIT_INVALIDATE) { 145 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; 146 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 147 INTEL_GT_SCRATCH_FIELD_DEFAULT) | 148 PIPE_CONTROL_GLOBAL_GTT; 149 *cs++ = 0; 150 *cs++ = 0; 151 152 for (i = 0; i < 12; i++) 153 *cs++ = MI_FLUSH; 154 155 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; 156 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 157 INTEL_GT_SCRATCH_FIELD_DEFAULT) | 158 PIPE_CONTROL_GLOBAL_GTT; 159 *cs++ = 0; 160 *cs++ = 0; 161 } 162 163 *cs++ = cmd; 164 165 intel_ring_advance(rq, cs); 166 167 return 0; 168 } 169 170 /* 171 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 172 * implementing two workarounds on gen6. From section 1.4.7.1 173 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 174 * 175 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 176 * produced by non-pipelined state commands), software needs to first 177 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 178 * 0. 179 * 180 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 181 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 182 * 183 * And the workaround for these two requires this workaround first: 184 * 185 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 186 * BEFORE the pipe-control with a post-sync op and no write-cache 187 * flushes. 188 * 189 * And this last workaround is tricky because of the requirements on 190 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 191 * volume 2 part 1: 192 * 193 * "1 of the following must also be set: 194 * - Render Target Cache Flush Enable ([12] of DW1) 195 * - Depth Cache Flush Enable ([0] of DW1) 196 * - Stall at Pixel Scoreboard ([1] of DW1) 197 * - Depth Stall ([13] of DW1) 198 * - Post-Sync Operation ([13] of DW1) 199 * - Notify Enable ([8] of DW1)" 200 * 201 * The cache flushes require the workaround flush that triggered this 202 * one, so we can't use it. Depth stall would trigger the same. 203 * Post-sync nonzero is what triggered this second workaround, so we 204 * can't use that one either. Notify enable is IRQs, which aren't 205 * really our business. That leaves only stall at scoreboard. 206 */ 207 static int 208 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) 209 { 210 u32 scratch_addr = 211 intel_gt_scratch_offset(rq->engine->gt, 212 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); 213 u32 *cs; 214 215 cs = intel_ring_begin(rq, 6); 216 if (IS_ERR(cs)) 217 return PTR_ERR(cs); 218 219 *cs++ = GFX_OP_PIPE_CONTROL(5); 220 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 221 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 222 *cs++ = 0; /* low dword */ 223 *cs++ = 0; /* high dword */ 224 *cs++ = MI_NOOP; 225 intel_ring_advance(rq, cs); 226 227 cs = intel_ring_begin(rq, 6); 228 if (IS_ERR(cs)) 229 return PTR_ERR(cs); 230 231 *cs++ = GFX_OP_PIPE_CONTROL(5); 232 *cs++ = PIPE_CONTROL_QW_WRITE; 233 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 234 *cs++ = 0; 235 *cs++ = 0; 236 *cs++ = MI_NOOP; 237 intel_ring_advance(rq, cs); 238 239 return 0; 240 } 241 242 static int 243 gen6_render_ring_flush(struct i915_request *rq, u32 mode) 244 { 245 u32 scratch_addr = 246 intel_gt_scratch_offset(rq->engine->gt, 247 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); 248 u32 *cs, flags = 0; 249 int ret; 250 251 /* Force SNB workarounds for PIPE_CONTROL flushes */ 252 ret = gen6_emit_post_sync_nonzero_flush(rq); 253 if (ret) 254 return ret; 255 256 /* Just flush everything. Experiments have shown that reducing the 257 * number of bits based on the write domains has little performance 258 * impact. 259 */ 260 if (mode & EMIT_FLUSH) { 261 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 262 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 263 /* 264 * Ensure that any following seqno writes only happen 265 * when the render cache is indeed flushed. 266 */ 267 flags |= PIPE_CONTROL_CS_STALL; 268 } 269 if (mode & EMIT_INVALIDATE) { 270 flags |= PIPE_CONTROL_TLB_INVALIDATE; 271 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 272 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 273 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 274 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 275 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 276 /* 277 * TLB invalidate requires a post-sync write. 278 */ 279 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 280 } 281 282 cs = intel_ring_begin(rq, 4); 283 if (IS_ERR(cs)) 284 return PTR_ERR(cs); 285 286 *cs++ = GFX_OP_PIPE_CONTROL(4); 287 *cs++ = flags; 288 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 289 *cs++ = 0; 290 intel_ring_advance(rq, cs); 291 292 return 0; 293 } 294 295 static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 296 { 297 /* First we do the gen6_emit_post_sync_nonzero_flush w/a */ 298 *cs++ = GFX_OP_PIPE_CONTROL(4); 299 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 300 *cs++ = 0; 301 *cs++ = 0; 302 303 *cs++ = GFX_OP_PIPE_CONTROL(4); 304 *cs++ = PIPE_CONTROL_QW_WRITE; 305 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 306 INTEL_GT_SCRATCH_FIELD_DEFAULT) | 307 PIPE_CONTROL_GLOBAL_GTT; 308 *cs++ = 0; 309 310 /* Finally we can flush and with it emit the breadcrumb */ 311 *cs++ = GFX_OP_PIPE_CONTROL(4); 312 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 313 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 314 PIPE_CONTROL_DC_FLUSH_ENABLE | 315 PIPE_CONTROL_QW_WRITE | 316 PIPE_CONTROL_CS_STALL); 317 *cs++ = i915_request_active_timeline(rq)->hwsp_offset | 318 PIPE_CONTROL_GLOBAL_GTT; 319 *cs++ = rq->fence.seqno; 320 321 *cs++ = MI_USER_INTERRUPT; 322 *cs++ = MI_NOOP; 323 324 rq->tail = intel_ring_offset(rq, cs); 325 assert_ring_tail_valid(rq->ring, rq->tail); 326 327 return cs; 328 } 329 330 static int 331 gen7_render_ring_cs_stall_wa(struct i915_request *rq) 332 { 333 u32 *cs; 334 335 cs = intel_ring_begin(rq, 4); 336 if (IS_ERR(cs)) 337 return PTR_ERR(cs); 338 339 *cs++ = GFX_OP_PIPE_CONTROL(4); 340 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 341 *cs++ = 0; 342 *cs++ = 0; 343 intel_ring_advance(rq, cs); 344 345 return 0; 346 } 347 348 static int 349 gen7_render_ring_flush(struct i915_request *rq, u32 mode) 350 { 351 u32 scratch_addr = 352 intel_gt_scratch_offset(rq->engine->gt, 353 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); 354 u32 *cs, flags = 0; 355 356 /* 357 * Ensure that any following seqno writes only happen when the render 358 * cache is indeed flushed. 359 * 360 * Workaround: 4th PIPE_CONTROL command (except the ones with only 361 * read-cache invalidate bits set) must have the CS_STALL bit set. We 362 * don't try to be clever and just set it unconditionally. 363 */ 364 flags |= PIPE_CONTROL_CS_STALL; 365 366 /* 367 * CS_STALL suggests at least a post-sync write. 368 */ 369 flags |= PIPE_CONTROL_QW_WRITE; 370 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 371 372 /* Just flush everything. Experiments have shown that reducing the 373 * number of bits based on the write domains has little performance 374 * impact. 375 */ 376 if (mode & EMIT_FLUSH) { 377 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 378 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 379 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 380 flags |= PIPE_CONTROL_FLUSH_ENABLE; 381 } 382 if (mode & EMIT_INVALIDATE) { 383 flags |= PIPE_CONTROL_TLB_INVALIDATE; 384 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 385 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 386 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 387 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 388 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 389 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 390 391 /* Workaround: we must issue a pipe_control with CS-stall bit 392 * set before a pipe_control command that has the state cache 393 * invalidate bit set. */ 394 gen7_render_ring_cs_stall_wa(rq); 395 } 396 397 cs = intel_ring_begin(rq, 4); 398 if (IS_ERR(cs)) 399 return PTR_ERR(cs); 400 401 *cs++ = GFX_OP_PIPE_CONTROL(4); 402 *cs++ = flags; 403 *cs++ = scratch_addr; 404 *cs++ = 0; 405 intel_ring_advance(rq, cs); 406 407 return 0; 408 } 409 410 static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 411 { 412 *cs++ = GFX_OP_PIPE_CONTROL(4); 413 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 414 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 415 PIPE_CONTROL_DC_FLUSH_ENABLE | 416 PIPE_CONTROL_FLUSH_ENABLE | 417 PIPE_CONTROL_QW_WRITE | 418 PIPE_CONTROL_GLOBAL_GTT_IVB | 419 PIPE_CONTROL_CS_STALL); 420 *cs++ = i915_request_active_timeline(rq)->hwsp_offset; 421 *cs++ = rq->fence.seqno; 422 423 *cs++ = MI_USER_INTERRUPT; 424 *cs++ = MI_NOOP; 425 426 rq->tail = intel_ring_offset(rq, cs); 427 assert_ring_tail_valid(rq->ring, rq->tail); 428 429 return cs; 430 } 431 432 static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 433 { 434 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 435 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 436 437 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 438 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; 439 *cs++ = rq->fence.seqno; 440 441 *cs++ = MI_USER_INTERRUPT; 442 443 rq->tail = intel_ring_offset(rq, cs); 444 assert_ring_tail_valid(rq->ring, rq->tail); 445 446 return cs; 447 } 448 449 #define GEN7_XCS_WA 32 450 static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 451 { 452 int i; 453 454 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 455 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 456 457 *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB | 458 MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 459 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; 460 *cs++ = rq->fence.seqno; 461 462 for (i = 0; i < GEN7_XCS_WA; i++) { 463 *cs++ = MI_STORE_DWORD_INDEX; 464 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 465 *cs++ = rq->fence.seqno; 466 } 467 468 *cs++ = MI_FLUSH_DW; 469 *cs++ = 0; 470 *cs++ = 0; 471 472 *cs++ = MI_USER_INTERRUPT; 473 *cs++ = MI_NOOP; 474 475 rq->tail = intel_ring_offset(rq, cs); 476 assert_ring_tail_valid(rq->ring, rq->tail); 477 478 return cs; 479 } 480 #undef GEN7_XCS_WA 481 482 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 483 { 484 /* 485 * Keep the render interrupt unmasked as this papers over 486 * lost interrupts following a reset. 487 */ 488 if (engine->class == RENDER_CLASS) { 489 if (INTEL_GEN(engine->i915) >= 6) 490 mask &= ~BIT(0); 491 else 492 mask &= ~I915_USER_INTERRUPT; 493 } 494 495 intel_engine_set_hwsp_writemask(engine, mask); 496 } 497 498 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 499 { 500 u32 addr; 501 502 addr = lower_32_bits(phys); 503 if (INTEL_GEN(engine->i915) >= 4) 504 addr |= (phys >> 28) & 0xf0; 505 506 intel_uncore_write(engine->uncore, HWS_PGA, addr); 507 } 508 509 static struct page *status_page(struct intel_engine_cs *engine) 510 { 511 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 512 513 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 514 return sg_page(obj->mm.pages->sgl); 515 } 516 517 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 518 { 519 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 520 set_hwstam(engine, ~0u); 521 } 522 523 static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 524 { 525 i915_reg_t hwsp; 526 527 /* 528 * The ring status page addresses are no longer next to the rest of 529 * the ring registers as of gen7. 530 */ 531 if (IS_GEN(engine->i915, 7)) { 532 switch (engine->id) { 533 /* 534 * No more rings exist on Gen7. Default case is only to shut up 535 * gcc switch check warning. 536 */ 537 default: 538 GEM_BUG_ON(engine->id); 539 /* fallthrough */ 540 case RCS0: 541 hwsp = RENDER_HWS_PGA_GEN7; 542 break; 543 case BCS0: 544 hwsp = BLT_HWS_PGA_GEN7; 545 break; 546 case VCS0: 547 hwsp = BSD_HWS_PGA_GEN7; 548 break; 549 case VECS0: 550 hwsp = VEBOX_HWS_PGA_GEN7; 551 break; 552 } 553 } else if (IS_GEN(engine->i915, 6)) { 554 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 555 } else { 556 hwsp = RING_HWS_PGA(engine->mmio_base); 557 } 558 559 intel_uncore_write(engine->uncore, hwsp, offset); 560 intel_uncore_posting_read(engine->uncore, hwsp); 561 } 562 563 static void flush_cs_tlb(struct intel_engine_cs *engine) 564 { 565 struct drm_i915_private *dev_priv = engine->i915; 566 567 if (!IS_GEN_RANGE(dev_priv, 6, 7)) 568 return; 569 570 /* ring should be idle before issuing a sync flush*/ 571 drm_WARN_ON(&dev_priv->drm, 572 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 573 574 ENGINE_WRITE(engine, RING_INSTPM, 575 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 576 INSTPM_SYNC_FLUSH)); 577 if (intel_wait_for_register(engine->uncore, 578 RING_INSTPM(engine->mmio_base), 579 INSTPM_SYNC_FLUSH, 0, 580 1000)) 581 drm_err(&dev_priv->drm, 582 "%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 583 engine->name); 584 } 585 586 static void ring_setup_status_page(struct intel_engine_cs *engine) 587 { 588 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 589 set_hwstam(engine, ~0u); 590 591 flush_cs_tlb(engine); 592 } 593 594 static bool stop_ring(struct intel_engine_cs *engine) 595 { 596 struct drm_i915_private *dev_priv = engine->i915; 597 598 if (INTEL_GEN(dev_priv) > 2) { 599 ENGINE_WRITE(engine, 600 RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING)); 601 if (intel_wait_for_register(engine->uncore, 602 RING_MI_MODE(engine->mmio_base), 603 MODE_IDLE, 604 MODE_IDLE, 605 1000)) { 606 drm_err(&dev_priv->drm, 607 "%s : timed out trying to stop ring\n", 608 engine->name); 609 610 /* 611 * Sometimes we observe that the idle flag is not 612 * set even though the ring is empty. So double 613 * check before giving up. 614 */ 615 if (ENGINE_READ(engine, RING_HEAD) != 616 ENGINE_READ(engine, RING_TAIL)) 617 return false; 618 } 619 } 620 621 ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL)); 622 623 ENGINE_WRITE(engine, RING_HEAD, 0); 624 ENGINE_WRITE(engine, RING_TAIL, 0); 625 626 /* The ring must be empty before it is disabled */ 627 ENGINE_WRITE(engine, RING_CTL, 0); 628 629 return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0; 630 } 631 632 static struct i915_address_space *vm_alias(struct i915_address_space *vm) 633 { 634 if (i915_is_ggtt(vm)) 635 vm = &i915_vm_to_ggtt(vm)->alias->vm; 636 637 return vm; 638 } 639 640 static void set_pp_dir(struct intel_engine_cs *engine) 641 { 642 struct i915_address_space *vm = vm_alias(engine->gt->vm); 643 644 if (vm) { 645 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 646 647 ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 648 ENGINE_WRITE(engine, RING_PP_DIR_BASE, 649 px_base(ppgtt->pd)->ggtt_offset << 10); 650 } 651 } 652 653 static int xcs_resume(struct intel_engine_cs *engine) 654 { 655 struct drm_i915_private *dev_priv = engine->i915; 656 struct intel_ring *ring = engine->legacy.ring; 657 int ret = 0; 658 659 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", 660 ring->head, ring->tail); 661 662 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); 663 664 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 665 if (!stop_ring(engine)) { 666 /* G45 ring initialization often fails to reset head to zero */ 667 drm_dbg(&dev_priv->drm, "%s head not reset to zero " 668 "ctl %08x head %08x tail %08x start %08x\n", 669 engine->name, 670 ENGINE_READ(engine, RING_CTL), 671 ENGINE_READ(engine, RING_HEAD), 672 ENGINE_READ(engine, RING_TAIL), 673 ENGINE_READ(engine, RING_START)); 674 675 if (!stop_ring(engine)) { 676 drm_err(&dev_priv->drm, 677 "failed to set %s head to zero " 678 "ctl %08x head %08x tail %08x start %08x\n", 679 engine->name, 680 ENGINE_READ(engine, RING_CTL), 681 ENGINE_READ(engine, RING_HEAD), 682 ENGINE_READ(engine, RING_TAIL), 683 ENGINE_READ(engine, RING_START)); 684 ret = -EIO; 685 goto out; 686 } 687 } 688 689 if (HWS_NEEDS_PHYSICAL(dev_priv)) 690 ring_setup_phys_status_page(engine); 691 else 692 ring_setup_status_page(engine); 693 694 intel_engine_reset_breadcrumbs(engine); 695 696 /* Enforce ordering by reading HEAD register back */ 697 ENGINE_POSTING_READ(engine, RING_HEAD); 698 699 /* 700 * Initialize the ring. This must happen _after_ we've cleared the ring 701 * registers with the above sequence (the readback of the HEAD registers 702 * also enforces ordering), otherwise the hw might lose the new ring 703 * register values. 704 */ 705 ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma)); 706 707 /* Check that the ring offsets point within the ring! */ 708 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 709 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 710 intel_ring_update_space(ring); 711 712 set_pp_dir(engine); 713 714 /* First wake the ring up to an empty/idle ring */ 715 ENGINE_WRITE(engine, RING_HEAD, ring->head); 716 ENGINE_WRITE(engine, RING_TAIL, ring->head); 717 ENGINE_POSTING_READ(engine, RING_TAIL); 718 719 ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); 720 721 /* If the head is still not zero, the ring is dead */ 722 if (intel_wait_for_register(engine->uncore, 723 RING_CTL(engine->mmio_base), 724 RING_VALID, RING_VALID, 725 50)) { 726 drm_err(&dev_priv->drm, "%s initialization failed " 727 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 728 engine->name, 729 ENGINE_READ(engine, RING_CTL), 730 ENGINE_READ(engine, RING_CTL) & RING_VALID, 731 ENGINE_READ(engine, RING_HEAD), ring->head, 732 ENGINE_READ(engine, RING_TAIL), ring->tail, 733 ENGINE_READ(engine, RING_START), 734 i915_ggtt_offset(ring->vma)); 735 ret = -EIO; 736 goto out; 737 } 738 739 if (INTEL_GEN(dev_priv) > 2) 740 ENGINE_WRITE(engine, 741 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 742 743 /* Now awake, let it get started */ 744 if (ring->tail != ring->head) { 745 ENGINE_WRITE(engine, RING_TAIL, ring->tail); 746 ENGINE_POSTING_READ(engine, RING_TAIL); 747 } 748 749 /* Papering over lost _interrupts_ immediately following the restart */ 750 intel_engine_signal_breadcrumbs(engine); 751 out: 752 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); 753 754 return ret; 755 } 756 757 static void reset_prepare(struct intel_engine_cs *engine) 758 { 759 struct intel_uncore *uncore = engine->uncore; 760 const u32 base = engine->mmio_base; 761 762 /* 763 * We stop engines, otherwise we might get failed reset and a 764 * dead gpu (on elk). Also as modern gpu as kbl can suffer 765 * from system hang if batchbuffer is progressing when 766 * the reset is issued, regardless of READY_TO_RESET ack. 767 * Thus assume it is best to stop engines on all gens 768 * where we have a gpu reset. 769 * 770 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 771 * 772 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 773 * 774 * FIXME: Wa for more modern gens needs to be validated 775 */ 776 ENGINE_TRACE(engine, "\n"); 777 778 if (intel_engine_stop_cs(engine)) 779 ENGINE_TRACE(engine, "timed out on STOP_RING\n"); 780 781 intel_uncore_write_fw(uncore, 782 RING_HEAD(base), 783 intel_uncore_read_fw(uncore, RING_TAIL(base))); 784 intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ 785 786 intel_uncore_write_fw(uncore, RING_HEAD(base), 0); 787 intel_uncore_write_fw(uncore, RING_TAIL(base), 0); 788 intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); 789 790 /* The ring must be empty before it is disabled */ 791 intel_uncore_write_fw(uncore, RING_CTL(base), 0); 792 793 /* Check acts as a post */ 794 if (intel_uncore_read_fw(uncore, RING_HEAD(base))) 795 ENGINE_TRACE(engine, "ring head [%x] not parked\n", 796 intel_uncore_read_fw(uncore, RING_HEAD(base))); 797 } 798 799 static void reset_rewind(struct intel_engine_cs *engine, bool stalled) 800 { 801 struct i915_request *pos, *rq; 802 unsigned long flags; 803 u32 head; 804 805 rq = NULL; 806 spin_lock_irqsave(&engine->active.lock, flags); 807 list_for_each_entry(pos, &engine->active.requests, sched.link) { 808 if (!i915_request_completed(pos)) { 809 rq = pos; 810 break; 811 } 812 } 813 814 /* 815 * The guilty request will get skipped on a hung engine. 816 * 817 * Users of client default contexts do not rely on logical 818 * state preserved between batches so it is safe to execute 819 * queued requests following the hang. Non default contexts 820 * rely on preserved state, so skipping a batch loses the 821 * evolution of the state and it needs to be considered corrupted. 822 * Executing more queued batches on top of corrupted state is 823 * risky. But we take the risk by trying to advance through 824 * the queued requests in order to make the client behaviour 825 * more predictable around resets, by not throwing away random 826 * amount of batches it has prepared for execution. Sophisticated 827 * clients can use gem_reset_stats_ioctl and dma fence status 828 * (exported via sync_file info ioctl on explicit fences) to observe 829 * when it loses the context state and should rebuild accordingly. 830 * 831 * The context ban, and ultimately the client ban, mechanism are safety 832 * valves if client submission ends up resulting in nothing more than 833 * subsequent hangs. 834 */ 835 836 if (rq) { 837 /* 838 * Try to restore the logical GPU state to match the 839 * continuation of the request queue. If we skip the 840 * context/PD restore, then the next request may try to execute 841 * assuming that its context is valid and loaded on the GPU and 842 * so may try to access invalid memory, prompting repeated GPU 843 * hangs. 844 * 845 * If the request was guilty, we still restore the logical 846 * state in case the next request requires it (e.g. the 847 * aliasing ppgtt), but skip over the hung batch. 848 * 849 * If the request was innocent, we try to replay the request 850 * with the restored context. 851 */ 852 __i915_request_reset(rq, stalled); 853 854 GEM_BUG_ON(rq->ring != engine->legacy.ring); 855 head = rq->head; 856 } else { 857 head = engine->legacy.ring->tail; 858 } 859 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 860 861 spin_unlock_irqrestore(&engine->active.lock, flags); 862 } 863 864 static void reset_finish(struct intel_engine_cs *engine) 865 { 866 } 867 868 static int rcs_resume(struct intel_engine_cs *engine) 869 { 870 struct drm_i915_private *i915 = engine->i915; 871 struct intel_uncore *uncore = engine->uncore; 872 873 /* 874 * Disable CONSTANT_BUFFER before it is loaded from the context 875 * image. For as it is loaded, it is executed and the stored 876 * address may no longer be valid, leading to a GPU hang. 877 * 878 * This imposes the requirement that userspace reload their 879 * CONSTANT_BUFFER on every batch, fortunately a requirement 880 * they are already accustomed to from before contexts were 881 * enabled. 882 */ 883 if (IS_GEN(i915, 4)) 884 intel_uncore_write(uncore, ECOSKPD, 885 _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE)); 886 887 if (IS_GEN_RANGE(i915, 6, 7)) 888 intel_uncore_write(uncore, INSTPM, 889 _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 890 891 return xcs_resume(engine); 892 } 893 894 static void reset_cancel(struct intel_engine_cs *engine) 895 { 896 struct i915_request *request; 897 unsigned long flags; 898 899 spin_lock_irqsave(&engine->active.lock, flags); 900 901 /* Mark all submitted requests as skipped. */ 902 list_for_each_entry(request, &engine->active.requests, sched.link) { 903 i915_request_set_error_once(request, -EIO); 904 i915_request_mark_complete(request); 905 } 906 907 /* Remaining _unready_ requests will be nop'ed when submitted */ 908 909 spin_unlock_irqrestore(&engine->active.lock, flags); 910 } 911 912 static void i9xx_submit_request(struct i915_request *request) 913 { 914 i915_request_submit(request); 915 wmb(); /* paranoid flush writes out of the WCB before mmio */ 916 917 ENGINE_WRITE(request->engine, RING_TAIL, 918 intel_ring_set_tail(request->ring, request->tail)); 919 } 920 921 static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) 922 { 923 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 924 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 925 926 *cs++ = MI_FLUSH; 927 928 *cs++ = MI_STORE_DWORD_INDEX; 929 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 930 *cs++ = rq->fence.seqno; 931 932 *cs++ = MI_USER_INTERRUPT; 933 *cs++ = MI_NOOP; 934 935 rq->tail = intel_ring_offset(rq, cs); 936 assert_ring_tail_valid(rq->ring, rq->tail); 937 938 return cs; 939 } 940 941 #define GEN5_WA_STORES 8 /* must be at least 1! */ 942 static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) 943 { 944 int i; 945 946 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 947 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 948 949 *cs++ = MI_FLUSH; 950 951 BUILD_BUG_ON(GEN5_WA_STORES < 1); 952 for (i = 0; i < GEN5_WA_STORES; i++) { 953 *cs++ = MI_STORE_DWORD_INDEX; 954 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 955 *cs++ = rq->fence.seqno; 956 } 957 958 *cs++ = MI_USER_INTERRUPT; 959 960 rq->tail = intel_ring_offset(rq, cs); 961 assert_ring_tail_valid(rq->ring, rq->tail); 962 963 return cs; 964 } 965 #undef GEN5_WA_STORES 966 967 static void 968 gen5_irq_enable(struct intel_engine_cs *engine) 969 { 970 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask); 971 } 972 973 static void 974 gen5_irq_disable(struct intel_engine_cs *engine) 975 { 976 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask); 977 } 978 979 static void 980 i9xx_irq_enable(struct intel_engine_cs *engine) 981 { 982 engine->i915->irq_mask &= ~engine->irq_enable_mask; 983 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); 984 intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); 985 } 986 987 static void 988 i9xx_irq_disable(struct intel_engine_cs *engine) 989 { 990 engine->i915->irq_mask |= engine->irq_enable_mask; 991 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); 992 } 993 994 static void 995 i8xx_irq_enable(struct intel_engine_cs *engine) 996 { 997 struct drm_i915_private *i915 = engine->i915; 998 999 i915->irq_mask &= ~engine->irq_enable_mask; 1000 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); 1001 ENGINE_POSTING_READ16(engine, RING_IMR); 1002 } 1003 1004 static void 1005 i8xx_irq_disable(struct intel_engine_cs *engine) 1006 { 1007 struct drm_i915_private *i915 = engine->i915; 1008 1009 i915->irq_mask |= engine->irq_enable_mask; 1010 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); 1011 } 1012 1013 static int 1014 bsd_ring_flush(struct i915_request *rq, u32 mode) 1015 { 1016 u32 *cs; 1017 1018 cs = intel_ring_begin(rq, 2); 1019 if (IS_ERR(cs)) 1020 return PTR_ERR(cs); 1021 1022 *cs++ = MI_FLUSH; 1023 *cs++ = MI_NOOP; 1024 intel_ring_advance(rq, cs); 1025 return 0; 1026 } 1027 1028 static void 1029 gen6_irq_enable(struct intel_engine_cs *engine) 1030 { 1031 ENGINE_WRITE(engine, RING_IMR, 1032 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 1033 1034 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ 1035 ENGINE_POSTING_READ(engine, RING_IMR); 1036 1037 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask); 1038 } 1039 1040 static void 1041 gen6_irq_disable(struct intel_engine_cs *engine) 1042 { 1043 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); 1044 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask); 1045 } 1046 1047 static void 1048 hsw_vebox_irq_enable(struct intel_engine_cs *engine) 1049 { 1050 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask); 1051 1052 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ 1053 ENGINE_POSTING_READ(engine, RING_IMR); 1054 1055 gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask); 1056 } 1057 1058 static void 1059 hsw_vebox_irq_disable(struct intel_engine_cs *engine) 1060 { 1061 ENGINE_WRITE(engine, RING_IMR, ~0); 1062 gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask); 1063 } 1064 1065 static int 1066 i965_emit_bb_start(struct i915_request *rq, 1067 u64 offset, u32 length, 1068 unsigned int dispatch_flags) 1069 { 1070 u32 *cs; 1071 1072 cs = intel_ring_begin(rq, 2); 1073 if (IS_ERR(cs)) 1074 return PTR_ERR(cs); 1075 1076 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & 1077 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965); 1078 *cs++ = offset; 1079 intel_ring_advance(rq, cs); 1080 1081 return 0; 1082 } 1083 1084 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1085 #define I830_BATCH_LIMIT SZ_256K 1086 #define I830_TLB_ENTRIES (2) 1087 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1088 static int 1089 i830_emit_bb_start(struct i915_request *rq, 1090 u64 offset, u32 len, 1091 unsigned int dispatch_flags) 1092 { 1093 u32 *cs, cs_offset = 1094 intel_gt_scratch_offset(rq->engine->gt, 1095 INTEL_GT_SCRATCH_FIELD_DEFAULT); 1096 1097 GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE); 1098 1099 cs = intel_ring_begin(rq, 6); 1100 if (IS_ERR(cs)) 1101 return PTR_ERR(cs); 1102 1103 /* Evict the invalid PTE TLBs */ 1104 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA; 1105 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096; 1106 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */ 1107 *cs++ = cs_offset; 1108 *cs++ = 0xdeadbeef; 1109 *cs++ = MI_NOOP; 1110 intel_ring_advance(rq, cs); 1111 1112 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 1113 if (len > I830_BATCH_LIMIT) 1114 return -ENOSPC; 1115 1116 cs = intel_ring_begin(rq, 6 + 2); 1117 if (IS_ERR(cs)) 1118 return PTR_ERR(cs); 1119 1120 /* Blit the batch (which has now all relocs applied) to the 1121 * stable batch scratch bo area (so that the CS never 1122 * stumbles over its tlb invalidation bug) ... 1123 */ 1124 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); 1125 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096; 1126 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096; 1127 *cs++ = cs_offset; 1128 *cs++ = 4096; 1129 *cs++ = offset; 1130 1131 *cs++ = MI_FLUSH; 1132 *cs++ = MI_NOOP; 1133 intel_ring_advance(rq, cs); 1134 1135 /* ... and execute it. */ 1136 offset = cs_offset; 1137 } 1138 1139 cs = intel_ring_begin(rq, 2); 1140 if (IS_ERR(cs)) 1141 return PTR_ERR(cs); 1142 1143 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1144 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1145 MI_BATCH_NON_SECURE); 1146 intel_ring_advance(rq, cs); 1147 1148 return 0; 1149 } 1150 1151 static int 1152 i915_emit_bb_start(struct i915_request *rq, 1153 u64 offset, u32 len, 1154 unsigned int dispatch_flags) 1155 { 1156 u32 *cs; 1157 1158 cs = intel_ring_begin(rq, 2); 1159 if (IS_ERR(cs)) 1160 return PTR_ERR(cs); 1161 1162 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1163 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1164 MI_BATCH_NON_SECURE); 1165 intel_ring_advance(rq, cs); 1166 1167 return 0; 1168 } 1169 1170 static void __ring_context_fini(struct intel_context *ce) 1171 { 1172 i915_vma_put(ce->state); 1173 } 1174 1175 static void ring_context_destroy(struct kref *ref) 1176 { 1177 struct intel_context *ce = container_of(ref, typeof(*ce), ref); 1178 1179 GEM_BUG_ON(intel_context_is_pinned(ce)); 1180 1181 if (ce->state) 1182 __ring_context_fini(ce); 1183 1184 intel_context_fini(ce); 1185 intel_context_free(ce); 1186 } 1187 1188 static int __context_pin_ppgtt(struct intel_context *ce) 1189 { 1190 struct i915_address_space *vm; 1191 int err = 0; 1192 1193 vm = vm_alias(ce->vm); 1194 if (vm) 1195 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); 1196 1197 return err; 1198 } 1199 1200 static void __context_unpin_ppgtt(struct intel_context *ce) 1201 { 1202 struct i915_address_space *vm; 1203 1204 vm = vm_alias(ce->vm); 1205 if (vm) 1206 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); 1207 } 1208 1209 static void ring_context_unpin(struct intel_context *ce) 1210 { 1211 __context_unpin_ppgtt(ce); 1212 } 1213 1214 static struct i915_vma * 1215 alloc_context_vma(struct intel_engine_cs *engine) 1216 { 1217 struct drm_i915_private *i915 = engine->i915; 1218 struct drm_i915_gem_object *obj; 1219 struct i915_vma *vma; 1220 int err; 1221 1222 obj = i915_gem_object_create_shmem(i915, engine->context_size); 1223 if (IS_ERR(obj)) 1224 return ERR_CAST(obj); 1225 1226 /* 1227 * Try to make the context utilize L3 as well as LLC. 1228 * 1229 * On VLV we don't have L3 controls in the PTEs so we 1230 * shouldn't touch the cache level, especially as that 1231 * would make the object snooped which might have a 1232 * negative performance impact. 1233 * 1234 * Snooping is required on non-llc platforms in execlist 1235 * mode, but since all GGTT accesses use PAT entry 0 we 1236 * get snooping anyway regardless of cache_level. 1237 * 1238 * This is only applicable for Ivy Bridge devices since 1239 * later platforms don't have L3 control bits in the PTE. 1240 */ 1241 if (IS_IVYBRIDGE(i915)) 1242 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); 1243 1244 if (engine->default_state) { 1245 void *vaddr; 1246 1247 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 1248 if (IS_ERR(vaddr)) { 1249 err = PTR_ERR(vaddr); 1250 goto err_obj; 1251 } 1252 1253 shmem_read(engine->default_state, 0, 1254 vaddr, engine->context_size); 1255 1256 i915_gem_object_flush_map(obj); 1257 i915_gem_object_unpin_map(obj); 1258 } 1259 1260 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 1261 if (IS_ERR(vma)) { 1262 err = PTR_ERR(vma); 1263 goto err_obj; 1264 } 1265 1266 return vma; 1267 1268 err_obj: 1269 i915_gem_object_put(obj); 1270 return ERR_PTR(err); 1271 } 1272 1273 static int ring_context_alloc(struct intel_context *ce) 1274 { 1275 struct intel_engine_cs *engine = ce->engine; 1276 1277 /* One ringbuffer to rule them all */ 1278 GEM_BUG_ON(!engine->legacy.ring); 1279 ce->ring = engine->legacy.ring; 1280 ce->timeline = intel_timeline_get(engine->legacy.timeline); 1281 1282 GEM_BUG_ON(ce->state); 1283 if (engine->context_size) { 1284 struct i915_vma *vma; 1285 1286 vma = alloc_context_vma(engine); 1287 if (IS_ERR(vma)) 1288 return PTR_ERR(vma); 1289 1290 ce->state = vma; 1291 if (engine->default_state) 1292 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 1293 } 1294 1295 return 0; 1296 } 1297 1298 static int ring_context_pin(struct intel_context *ce) 1299 { 1300 return __context_pin_ppgtt(ce); 1301 } 1302 1303 static void ring_context_reset(struct intel_context *ce) 1304 { 1305 intel_ring_reset(ce->ring, ce->ring->emit); 1306 } 1307 1308 static const struct intel_context_ops ring_context_ops = { 1309 .alloc = ring_context_alloc, 1310 1311 .pin = ring_context_pin, 1312 .unpin = ring_context_unpin, 1313 1314 .enter = intel_context_enter_engine, 1315 .exit = intel_context_exit_engine, 1316 1317 .reset = ring_context_reset, 1318 .destroy = ring_context_destroy, 1319 }; 1320 1321 static int load_pd_dir(struct i915_request *rq, 1322 const struct i915_ppgtt *ppgtt, 1323 u32 valid) 1324 { 1325 const struct intel_engine_cs * const engine = rq->engine; 1326 u32 *cs; 1327 1328 cs = intel_ring_begin(rq, 12); 1329 if (IS_ERR(cs)) 1330 return PTR_ERR(cs); 1331 1332 *cs++ = MI_LOAD_REGISTER_IMM(1); 1333 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); 1334 *cs++ = valid; 1335 1336 *cs++ = MI_LOAD_REGISTER_IMM(1); 1337 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 1338 *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; 1339 1340 /* Stall until the page table load is complete? */ 1341 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1342 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 1343 *cs++ = intel_gt_scratch_offset(engine->gt, 1344 INTEL_GT_SCRATCH_FIELD_DEFAULT); 1345 1346 *cs++ = MI_LOAD_REGISTER_IMM(1); 1347 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); 1348 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE); 1349 1350 intel_ring_advance(rq, cs); 1351 1352 return rq->engine->emit_flush(rq, EMIT_FLUSH); 1353 } 1354 1355 static inline int mi_set_context(struct i915_request *rq, 1356 struct intel_context *ce, 1357 u32 flags) 1358 { 1359 struct drm_i915_private *i915 = rq->i915; 1360 struct intel_engine_cs *engine = rq->engine; 1361 enum intel_engine_id id; 1362 const int num_engines = 1363 IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; 1364 bool force_restore = false; 1365 int len; 1366 u32 *cs; 1367 1368 len = 4; 1369 if (IS_GEN(i915, 7)) 1370 len += 2 + (num_engines ? 4 * num_engines + 6 : 0); 1371 else if (IS_GEN(i915, 5)) 1372 len += 2; 1373 if (flags & MI_FORCE_RESTORE) { 1374 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 1375 flags &= ~MI_FORCE_RESTORE; 1376 force_restore = true; 1377 len += 2; 1378 } 1379 1380 cs = intel_ring_begin(rq, len); 1381 if (IS_ERR(cs)) 1382 return PTR_ERR(cs); 1383 1384 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 1385 if (IS_GEN(i915, 7)) { 1386 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1387 if (num_engines) { 1388 struct intel_engine_cs *signaller; 1389 1390 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 1391 for_each_engine(signaller, engine->gt, id) { 1392 if (signaller == engine) 1393 continue; 1394 1395 *cs++ = i915_mmio_reg_offset( 1396 RING_PSMI_CTL(signaller->mmio_base)); 1397 *cs++ = _MASKED_BIT_ENABLE( 1398 GEN6_PSMI_SLEEP_MSG_DISABLE); 1399 } 1400 } 1401 } else if (IS_GEN(i915, 5)) { 1402 /* 1403 * This w/a is only listed for pre-production ilk a/b steppings, 1404 * but is also mentioned for programming the powerctx. To be 1405 * safe, just apply the workaround; we do not use SyncFlush so 1406 * this should never take effect and so be a no-op! 1407 */ 1408 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; 1409 } 1410 1411 if (force_restore) { 1412 /* 1413 * The HW doesn't handle being told to restore the current 1414 * context very well. Quite often it likes goes to go off and 1415 * sulk, especially when it is meant to be reloading PP_DIR. 1416 * A very simple fix to force the reload is to simply switch 1417 * away from the current context and back again. 1418 * 1419 * Note that the kernel_context will contain random state 1420 * following the INHIBIT_RESTORE. We accept this since we 1421 * never use the kernel_context state; it is merely a 1422 * placeholder we use to flush other contexts. 1423 */ 1424 *cs++ = MI_SET_CONTEXT; 1425 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | 1426 MI_MM_SPACE_GTT | 1427 MI_RESTORE_INHIBIT; 1428 } 1429 1430 *cs++ = MI_NOOP; 1431 *cs++ = MI_SET_CONTEXT; 1432 *cs++ = i915_ggtt_offset(ce->state) | flags; 1433 /* 1434 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 1435 * WaMiSetContext_Hang:snb,ivb,vlv 1436 */ 1437 *cs++ = MI_NOOP; 1438 1439 if (IS_GEN(i915, 7)) { 1440 if (num_engines) { 1441 struct intel_engine_cs *signaller; 1442 i915_reg_t last_reg = {}; /* keep gcc quiet */ 1443 1444 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 1445 for_each_engine(signaller, engine->gt, id) { 1446 if (signaller == engine) 1447 continue; 1448 1449 last_reg = RING_PSMI_CTL(signaller->mmio_base); 1450 *cs++ = i915_mmio_reg_offset(last_reg); 1451 *cs++ = _MASKED_BIT_DISABLE( 1452 GEN6_PSMI_SLEEP_MSG_DISABLE); 1453 } 1454 1455 /* Insert a delay before the next switch! */ 1456 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1457 *cs++ = i915_mmio_reg_offset(last_reg); 1458 *cs++ = intel_gt_scratch_offset(engine->gt, 1459 INTEL_GT_SCRATCH_FIELD_DEFAULT); 1460 *cs++ = MI_NOOP; 1461 } 1462 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1463 } else if (IS_GEN(i915, 5)) { 1464 *cs++ = MI_SUSPEND_FLUSH; 1465 } 1466 1467 intel_ring_advance(rq, cs); 1468 1469 return 0; 1470 } 1471 1472 static int remap_l3_slice(struct i915_request *rq, int slice) 1473 { 1474 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; 1475 int i; 1476 1477 if (!remap_info) 1478 return 0; 1479 1480 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2); 1481 if (IS_ERR(cs)) 1482 return PTR_ERR(cs); 1483 1484 /* 1485 * Note: We do not worry about the concurrent register cacheline hang 1486 * here because no other code should access these registers other than 1487 * at initialization time. 1488 */ 1489 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4); 1490 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { 1491 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 1492 *cs++ = remap_info[i]; 1493 } 1494 *cs++ = MI_NOOP; 1495 intel_ring_advance(rq, cs); 1496 1497 return 0; 1498 } 1499 1500 static int remap_l3(struct i915_request *rq) 1501 { 1502 struct i915_gem_context *ctx = i915_request_gem_context(rq); 1503 int i, err; 1504 1505 if (!ctx || !ctx->remap_slice) 1506 return 0; 1507 1508 for (i = 0; i < MAX_L3_SLICES; i++) { 1509 if (!(ctx->remap_slice & BIT(i))) 1510 continue; 1511 1512 err = remap_l3_slice(rq, i); 1513 if (err) 1514 return err; 1515 } 1516 1517 ctx->remap_slice = 0; 1518 return 0; 1519 } 1520 1521 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) 1522 { 1523 int ret; 1524 1525 if (!vm) 1526 return 0; 1527 1528 ret = rq->engine->emit_flush(rq, EMIT_FLUSH); 1529 if (ret) 1530 return ret; 1531 1532 /* 1533 * Not only do we need a full barrier (post-sync write) after 1534 * invalidating the TLBs, but we need to wait a little bit 1535 * longer. Whether this is merely delaying us, or the 1536 * subsequent flush is a key part of serialising with the 1537 * post-sync op, this extra pass appears vital before a 1538 * mm switch! 1539 */ 1540 ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G); 1541 if (ret) 1542 return ret; 1543 1544 return rq->engine->emit_flush(rq, EMIT_INVALIDATE); 1545 } 1546 1547 static int clear_residuals(struct i915_request *rq) 1548 { 1549 struct intel_engine_cs *engine = rq->engine; 1550 int ret; 1551 1552 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); 1553 if (ret) 1554 return ret; 1555 1556 if (engine->kernel_context->state) { 1557 ret = mi_set_context(rq, 1558 engine->kernel_context, 1559 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT); 1560 if (ret) 1561 return ret; 1562 } 1563 1564 ret = engine->emit_bb_start(rq, 1565 engine->wa_ctx.vma->node.start, 0, 1566 0); 1567 if (ret) 1568 return ret; 1569 1570 ret = engine->emit_flush(rq, EMIT_FLUSH); 1571 if (ret) 1572 return ret; 1573 1574 /* Always invalidate before the next switch_mm() */ 1575 return engine->emit_flush(rq, EMIT_INVALIDATE); 1576 } 1577 1578 static int switch_context(struct i915_request *rq) 1579 { 1580 struct intel_engine_cs *engine = rq->engine; 1581 struct intel_context *ce = rq->context; 1582 void **residuals = NULL; 1583 int ret; 1584 1585 GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); 1586 1587 if (engine->wa_ctx.vma && ce != engine->kernel_context) { 1588 if (engine->wa_ctx.vma->private != ce) { 1589 ret = clear_residuals(rq); 1590 if (ret) 1591 return ret; 1592 1593 residuals = &engine->wa_ctx.vma->private; 1594 } 1595 } 1596 1597 ret = switch_mm(rq, vm_alias(ce->vm)); 1598 if (ret) 1599 return ret; 1600 1601 if (ce->state) { 1602 u32 flags; 1603 1604 GEM_BUG_ON(engine->id != RCS0); 1605 1606 /* For resource streamer on HSW+ and power context elsewhere */ 1607 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); 1608 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); 1609 1610 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; 1611 if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) 1612 flags |= MI_RESTORE_EXT_STATE_EN; 1613 else 1614 flags |= MI_RESTORE_INHIBIT; 1615 1616 ret = mi_set_context(rq, ce, flags); 1617 if (ret) 1618 return ret; 1619 } 1620 1621 ret = remap_l3(rq); 1622 if (ret) 1623 return ret; 1624 1625 /* 1626 * Now past the point of no return, this request _will_ be emitted. 1627 * 1628 * Or at least this preamble will be emitted, the request may be 1629 * interrupted prior to submitting the user payload. If so, we 1630 * still submit the "empty" request in order to preserve global 1631 * state tracking such as this, our tracking of the current 1632 * dirty context. 1633 */ 1634 if (residuals) { 1635 intel_context_put(*residuals); 1636 *residuals = intel_context_get(ce); 1637 } 1638 1639 return 0; 1640 } 1641 1642 static int ring_request_alloc(struct i915_request *request) 1643 { 1644 int ret; 1645 1646 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 1647 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); 1648 1649 /* 1650 * Flush enough space to reduce the likelihood of waiting after 1651 * we start building the request - in which case we will just 1652 * have to repeat work. 1653 */ 1654 request->reserved_space += LEGACY_REQUEST_SIZE; 1655 1656 /* Unconditionally invalidate GPU caches and TLBs. */ 1657 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 1658 if (ret) 1659 return ret; 1660 1661 ret = switch_context(request); 1662 if (ret) 1663 return ret; 1664 1665 request->reserved_space -= LEGACY_REQUEST_SIZE; 1666 return 0; 1667 } 1668 1669 static void gen6_bsd_submit_request(struct i915_request *request) 1670 { 1671 struct intel_uncore *uncore = request->engine->uncore; 1672 1673 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1674 1675 /* Every tail move must follow the sequence below */ 1676 1677 /* Disable notification that the ring is IDLE. The GT 1678 * will then assume that it is busy and bring it out of rc6. 1679 */ 1680 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 1681 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1682 1683 /* Clear the context id. Here be magic! */ 1684 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); 1685 1686 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1687 if (__intel_wait_for_register_fw(uncore, 1688 GEN6_BSD_SLEEP_PSMI_CONTROL, 1689 GEN6_BSD_SLEEP_INDICATOR, 1690 0, 1691 1000, 0, NULL)) 1692 drm_err(&uncore->i915->drm, 1693 "timed out waiting for the BSD ring to wake up\n"); 1694 1695 /* Now that the ring is fully powered up, update the tail */ 1696 i9xx_submit_request(request); 1697 1698 /* Let the ring send IDLE messages to the GT again, 1699 * and so let it sleep to conserve power when idle. 1700 */ 1701 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 1702 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1703 1704 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1705 } 1706 1707 static int mi_flush_dw(struct i915_request *rq, u32 flags) 1708 { 1709 u32 cmd, *cs; 1710 1711 cs = intel_ring_begin(rq, 4); 1712 if (IS_ERR(cs)) 1713 return PTR_ERR(cs); 1714 1715 cmd = MI_FLUSH_DW; 1716 1717 /* 1718 * We always require a command barrier so that subsequent 1719 * commands, such as breadcrumb interrupts, are strictly ordered 1720 * wrt the contents of the write cache being flushed to memory 1721 * (and thus being coherent from the CPU). 1722 */ 1723 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1724 1725 /* 1726 * Bspec vol 1c.3 - blitter engine command streamer: 1727 * "If ENABLED, all TLBs will be invalidated once the flush 1728 * operation is complete. This bit is only valid when the 1729 * Post-Sync Operation field is a value of 1h or 3h." 1730 */ 1731 cmd |= flags; 1732 1733 *cs++ = cmd; 1734 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 1735 *cs++ = 0; 1736 *cs++ = MI_NOOP; 1737 1738 intel_ring_advance(rq, cs); 1739 1740 return 0; 1741 } 1742 1743 static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) 1744 { 1745 return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0); 1746 } 1747 1748 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) 1749 { 1750 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD); 1751 } 1752 1753 static int 1754 hsw_emit_bb_start(struct i915_request *rq, 1755 u64 offset, u32 len, 1756 unsigned int dispatch_flags) 1757 { 1758 u32 *cs; 1759 1760 cs = intel_ring_begin(rq, 2); 1761 if (IS_ERR(cs)) 1762 return PTR_ERR(cs); 1763 1764 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 1765 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW); 1766 /* bit0-7 is the length on GEN6+ */ 1767 *cs++ = offset; 1768 intel_ring_advance(rq, cs); 1769 1770 return 0; 1771 } 1772 1773 static int 1774 gen6_emit_bb_start(struct i915_request *rq, 1775 u64 offset, u32 len, 1776 unsigned int dispatch_flags) 1777 { 1778 u32 *cs; 1779 1780 cs = intel_ring_begin(rq, 2); 1781 if (IS_ERR(cs)) 1782 return PTR_ERR(cs); 1783 1784 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 1785 0 : MI_BATCH_NON_SECURE_I965); 1786 /* bit0-7 is the length on GEN6+ */ 1787 *cs++ = offset; 1788 intel_ring_advance(rq, cs); 1789 1790 return 0; 1791 } 1792 1793 /* Blitter support (SandyBridge+) */ 1794 1795 static int gen6_ring_flush(struct i915_request *rq, u32 mode) 1796 { 1797 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB); 1798 } 1799 1800 static void i9xx_set_default_submission(struct intel_engine_cs *engine) 1801 { 1802 engine->submit_request = i9xx_submit_request; 1803 1804 engine->park = NULL; 1805 engine->unpark = NULL; 1806 } 1807 1808 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 1809 { 1810 i9xx_set_default_submission(engine); 1811 engine->submit_request = gen6_bsd_submit_request; 1812 } 1813 1814 static void ring_release(struct intel_engine_cs *engine) 1815 { 1816 struct drm_i915_private *dev_priv = engine->i915; 1817 1818 drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 && 1819 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 1820 1821 intel_engine_cleanup_common(engine); 1822 1823 if (engine->wa_ctx.vma) { 1824 intel_context_put(engine->wa_ctx.vma->private); 1825 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1826 } 1827 1828 intel_ring_unpin(engine->legacy.ring); 1829 intel_ring_put(engine->legacy.ring); 1830 1831 intel_timeline_unpin(engine->legacy.timeline); 1832 intel_timeline_put(engine->legacy.timeline); 1833 } 1834 1835 static void setup_irq(struct intel_engine_cs *engine) 1836 { 1837 struct drm_i915_private *i915 = engine->i915; 1838 1839 if (INTEL_GEN(i915) >= 6) { 1840 engine->irq_enable = gen6_irq_enable; 1841 engine->irq_disable = gen6_irq_disable; 1842 } else if (INTEL_GEN(i915) >= 5) { 1843 engine->irq_enable = gen5_irq_enable; 1844 engine->irq_disable = gen5_irq_disable; 1845 } else if (INTEL_GEN(i915) >= 3) { 1846 engine->irq_enable = i9xx_irq_enable; 1847 engine->irq_disable = i9xx_irq_disable; 1848 } else { 1849 engine->irq_enable = i8xx_irq_enable; 1850 engine->irq_disable = i8xx_irq_disable; 1851 } 1852 } 1853 1854 static void setup_common(struct intel_engine_cs *engine) 1855 { 1856 struct drm_i915_private *i915 = engine->i915; 1857 1858 /* gen8+ are only supported with execlists */ 1859 GEM_BUG_ON(INTEL_GEN(i915) >= 8); 1860 1861 setup_irq(engine); 1862 1863 engine->resume = xcs_resume; 1864 engine->reset.prepare = reset_prepare; 1865 engine->reset.rewind = reset_rewind; 1866 engine->reset.cancel = reset_cancel; 1867 engine->reset.finish = reset_finish; 1868 1869 engine->cops = &ring_context_ops; 1870 engine->request_alloc = ring_request_alloc; 1871 1872 /* 1873 * Using a global execution timeline; the previous final breadcrumb is 1874 * equivalent to our next initial bread so we can elide 1875 * engine->emit_init_breadcrumb(). 1876 */ 1877 engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb; 1878 if (IS_GEN(i915, 5)) 1879 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 1880 1881 engine->set_default_submission = i9xx_set_default_submission; 1882 1883 if (INTEL_GEN(i915) >= 6) 1884 engine->emit_bb_start = gen6_emit_bb_start; 1885 else if (INTEL_GEN(i915) >= 4) 1886 engine->emit_bb_start = i965_emit_bb_start; 1887 else if (IS_I830(i915) || IS_I845G(i915)) 1888 engine->emit_bb_start = i830_emit_bb_start; 1889 else 1890 engine->emit_bb_start = i915_emit_bb_start; 1891 } 1892 1893 static void setup_rcs(struct intel_engine_cs *engine) 1894 { 1895 struct drm_i915_private *i915 = engine->i915; 1896 1897 if (HAS_L3_DPF(i915)) 1898 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1899 1900 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1901 1902 if (INTEL_GEN(i915) >= 7) { 1903 engine->emit_flush = gen7_render_ring_flush; 1904 engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb; 1905 } else if (IS_GEN(i915, 6)) { 1906 engine->emit_flush = gen6_render_ring_flush; 1907 engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb; 1908 } else if (IS_GEN(i915, 5)) { 1909 engine->emit_flush = gen4_render_ring_flush; 1910 } else { 1911 if (INTEL_GEN(i915) < 4) 1912 engine->emit_flush = gen2_render_ring_flush; 1913 else 1914 engine->emit_flush = gen4_render_ring_flush; 1915 engine->irq_enable_mask = I915_USER_INTERRUPT; 1916 } 1917 1918 if (IS_HASWELL(i915)) 1919 engine->emit_bb_start = hsw_emit_bb_start; 1920 1921 engine->resume = rcs_resume; 1922 } 1923 1924 static void setup_vcs(struct intel_engine_cs *engine) 1925 { 1926 struct drm_i915_private *i915 = engine->i915; 1927 1928 if (INTEL_GEN(i915) >= 6) { 1929 /* gen6 bsd needs a special wa for tail updates */ 1930 if (IS_GEN(i915, 6)) 1931 engine->set_default_submission = gen6_bsd_set_default_submission; 1932 engine->emit_flush = gen6_bsd_ring_flush; 1933 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1934 1935 if (IS_GEN(i915, 6)) 1936 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; 1937 else 1938 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 1939 } else { 1940 engine->emit_flush = bsd_ring_flush; 1941 if (IS_GEN(i915, 5)) 1942 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 1943 else 1944 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1945 } 1946 } 1947 1948 static void setup_bcs(struct intel_engine_cs *engine) 1949 { 1950 struct drm_i915_private *i915 = engine->i915; 1951 1952 engine->emit_flush = gen6_ring_flush; 1953 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 1954 1955 if (IS_GEN(i915, 6)) 1956 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; 1957 else 1958 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 1959 } 1960 1961 static void setup_vecs(struct intel_engine_cs *engine) 1962 { 1963 struct drm_i915_private *i915 = engine->i915; 1964 1965 GEM_BUG_ON(INTEL_GEN(i915) < 7); 1966 1967 engine->emit_flush = gen6_ring_flush; 1968 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 1969 engine->irq_enable = hsw_vebox_irq_enable; 1970 engine->irq_disable = hsw_vebox_irq_disable; 1971 1972 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 1973 } 1974 1975 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, 1976 struct i915_vma * const vma) 1977 { 1978 return gen7_setup_clear_gpr_bb(engine, vma); 1979 } 1980 1981 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine) 1982 { 1983 struct drm_i915_gem_object *obj; 1984 struct i915_vma *vma; 1985 int size; 1986 int err; 1987 1988 size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); 1989 if (size <= 0) 1990 return size; 1991 1992 size = ALIGN(size, PAGE_SIZE); 1993 obj = i915_gem_object_create_internal(engine->i915, size); 1994 if (IS_ERR(obj)) 1995 return PTR_ERR(obj); 1996 1997 vma = i915_vma_instance(obj, engine->gt->vm, NULL); 1998 if (IS_ERR(vma)) { 1999 err = PTR_ERR(vma); 2000 goto err_obj; 2001 } 2002 2003 vma->private = intel_context_create(engine); /* dummy residuals */ 2004 if (IS_ERR(vma->private)) { 2005 err = PTR_ERR(vma->private); 2006 goto err_obj; 2007 } 2008 2009 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH); 2010 if (err) 2011 goto err_private; 2012 2013 err = i915_vma_sync(vma); 2014 if (err) 2015 goto err_unpin; 2016 2017 err = gen7_ctx_switch_bb_setup(engine, vma); 2018 if (err) 2019 goto err_unpin; 2020 2021 engine->wa_ctx.vma = vma; 2022 return 0; 2023 2024 err_unpin: 2025 i915_vma_unpin(vma); 2026 err_private: 2027 intel_context_put(vma->private); 2028 err_obj: 2029 i915_gem_object_put(obj); 2030 return err; 2031 } 2032 2033 int intel_ring_submission_setup(struct intel_engine_cs *engine) 2034 { 2035 struct intel_timeline *timeline; 2036 struct intel_ring *ring; 2037 int err; 2038 2039 setup_common(engine); 2040 2041 switch (engine->class) { 2042 case RENDER_CLASS: 2043 setup_rcs(engine); 2044 break; 2045 case VIDEO_DECODE_CLASS: 2046 setup_vcs(engine); 2047 break; 2048 case COPY_ENGINE_CLASS: 2049 setup_bcs(engine); 2050 break; 2051 case VIDEO_ENHANCEMENT_CLASS: 2052 setup_vecs(engine); 2053 break; 2054 default: 2055 MISSING_CASE(engine->class); 2056 return -ENODEV; 2057 } 2058 2059 timeline = intel_timeline_create(engine->gt, engine->status_page.vma); 2060 if (IS_ERR(timeline)) { 2061 err = PTR_ERR(timeline); 2062 goto err; 2063 } 2064 GEM_BUG_ON(timeline->has_initial_breadcrumb); 2065 2066 err = intel_timeline_pin(timeline); 2067 if (err) 2068 goto err_timeline; 2069 2070 ring = intel_engine_create_ring(engine, SZ_16K); 2071 if (IS_ERR(ring)) { 2072 err = PTR_ERR(ring); 2073 goto err_timeline_unpin; 2074 } 2075 2076 err = intel_ring_pin(ring); 2077 if (err) 2078 goto err_ring; 2079 2080 GEM_BUG_ON(engine->legacy.ring); 2081 engine->legacy.ring = ring; 2082 engine->legacy.timeline = timeline; 2083 2084 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 2085 2086 if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) { 2087 err = gen7_ctx_switch_bb_init(engine); 2088 if (err) 2089 goto err_ring_unpin; 2090 } 2091 2092 /* Finally, take ownership and responsibility for cleanup! */ 2093 engine->release = ring_release; 2094 2095 return 0; 2096 2097 err_ring_unpin: 2098 intel_ring_unpin(ring); 2099 err_ring: 2100 intel_ring_put(ring); 2101 err_timeline_unpin: 2102 intel_timeline_unpin(timeline); 2103 err_timeline: 2104 intel_timeline_put(timeline); 2105 err: 2106 intel_engine_cleanup_common(engine); 2107 return err; 2108 } 2109 2110 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2111 #include "selftest_ring_submission.c" 2112 #endif 2113