1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <linux/log2.h> 31 32 #include "gem/i915_gem_context.h" 33 34 #include "gen6_ppgtt.h" 35 #include "gen7_renderclear.h" 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_context.h" 39 #include "intel_gt.h" 40 #include "intel_gt_irq.h" 41 #include "intel_gt_pm_irq.h" 42 #include "intel_reset.h" 43 #include "intel_ring.h" 44 #include "intel_workarounds.h" 45 46 /* Rough estimate of the typical request size, performing a flush, 47 * set-context and then emitting the batch. 48 */ 49 #define LEGACY_REQUEST_SIZE 200 50 51 static int 52 gen2_render_ring_flush(struct i915_request *rq, u32 mode) 53 { 54 unsigned int num_store_dw; 55 u32 cmd, *cs; 56 57 cmd = MI_FLUSH; 58 num_store_dw = 0; 59 if (mode & EMIT_INVALIDATE) 60 cmd |= MI_READ_FLUSH; 61 if (mode & EMIT_FLUSH) 62 num_store_dw = 4; 63 64 cs = intel_ring_begin(rq, 2 + 3 * num_store_dw); 65 if (IS_ERR(cs)) 66 return PTR_ERR(cs); 67 68 *cs++ = cmd; 69 while (num_store_dw--) { 70 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; 71 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 72 INTEL_GT_SCRATCH_FIELD_DEFAULT); 73 *cs++ = 0; 74 } 75 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH; 76 77 intel_ring_advance(rq, cs); 78 79 return 0; 80 } 81 82 static int 83 gen4_render_ring_flush(struct i915_request *rq, u32 mode) 84 { 85 u32 cmd, *cs; 86 int i; 87 88 /* 89 * read/write caches: 90 * 91 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 92 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 93 * also flushed at 2d versus 3d pipeline switches. 94 * 95 * read-only caches: 96 * 97 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 98 * MI_READ_FLUSH is set, and is always flushed on 965. 99 * 100 * I915_GEM_DOMAIN_COMMAND may not exist? 101 * 102 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 103 * invalidated when MI_EXE_FLUSH is set. 104 * 105 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 106 * invalidated with every MI_FLUSH. 107 * 108 * TLBs: 109 * 110 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 111 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 112 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 113 * are flushed at any MI_FLUSH. 114 */ 115 116 cmd = MI_FLUSH; 117 if (mode & EMIT_INVALIDATE) { 118 cmd |= MI_EXE_FLUSH; 119 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5)) 120 cmd |= MI_INVALIDATE_ISP; 121 } 122 123 i = 2; 124 if (mode & EMIT_INVALIDATE) 125 i += 20; 126 127 cs = intel_ring_begin(rq, i); 128 if (IS_ERR(cs)) 129 return PTR_ERR(cs); 130 131 *cs++ = cmd; 132 133 /* 134 * A random delay to let the CS invalidate take effect? Without this 135 * delay, the GPU relocation path fails as the CS does not see 136 * the updated contents. Just as important, if we apply the flushes 137 * to the EMIT_FLUSH branch (i.e. immediately after the relocation 138 * write and before the invalidate on the next batch), the relocations 139 * still fail. This implies that is a delay following invalidation 140 * that is required to reset the caches as opposed to a delay to 141 * ensure the memory is written. 142 */ 143 if (mode & EMIT_INVALIDATE) { 144 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; 145 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 146 INTEL_GT_SCRATCH_FIELD_DEFAULT) | 147 PIPE_CONTROL_GLOBAL_GTT; 148 *cs++ = 0; 149 *cs++ = 0; 150 151 for (i = 0; i < 12; i++) 152 *cs++ = MI_FLUSH; 153 154 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; 155 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 156 INTEL_GT_SCRATCH_FIELD_DEFAULT) | 157 PIPE_CONTROL_GLOBAL_GTT; 158 *cs++ = 0; 159 *cs++ = 0; 160 } 161 162 *cs++ = cmd; 163 164 intel_ring_advance(rq, cs); 165 166 return 0; 167 } 168 169 /* 170 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 171 * implementing two workarounds on gen6. From section 1.4.7.1 172 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 173 * 174 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 175 * produced by non-pipelined state commands), software needs to first 176 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 177 * 0. 178 * 179 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 180 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 181 * 182 * And the workaround for these two requires this workaround first: 183 * 184 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 185 * BEFORE the pipe-control with a post-sync op and no write-cache 186 * flushes. 187 * 188 * And this last workaround is tricky because of the requirements on 189 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 190 * volume 2 part 1: 191 * 192 * "1 of the following must also be set: 193 * - Render Target Cache Flush Enable ([12] of DW1) 194 * - Depth Cache Flush Enable ([0] of DW1) 195 * - Stall at Pixel Scoreboard ([1] of DW1) 196 * - Depth Stall ([13] of DW1) 197 * - Post-Sync Operation ([13] of DW1) 198 * - Notify Enable ([8] of DW1)" 199 * 200 * The cache flushes require the workaround flush that triggered this 201 * one, so we can't use it. Depth stall would trigger the same. 202 * Post-sync nonzero is what triggered this second workaround, so we 203 * can't use that one either. Notify enable is IRQs, which aren't 204 * really our business. That leaves only stall at scoreboard. 205 */ 206 static int 207 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq) 208 { 209 u32 scratch_addr = 210 intel_gt_scratch_offset(rq->engine->gt, 211 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); 212 u32 *cs; 213 214 cs = intel_ring_begin(rq, 6); 215 if (IS_ERR(cs)) 216 return PTR_ERR(cs); 217 218 *cs++ = GFX_OP_PIPE_CONTROL(5); 219 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 220 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 221 *cs++ = 0; /* low dword */ 222 *cs++ = 0; /* high dword */ 223 *cs++ = MI_NOOP; 224 intel_ring_advance(rq, cs); 225 226 cs = intel_ring_begin(rq, 6); 227 if (IS_ERR(cs)) 228 return PTR_ERR(cs); 229 230 *cs++ = GFX_OP_PIPE_CONTROL(5); 231 *cs++ = PIPE_CONTROL_QW_WRITE; 232 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 233 *cs++ = 0; 234 *cs++ = 0; 235 *cs++ = MI_NOOP; 236 intel_ring_advance(rq, cs); 237 238 return 0; 239 } 240 241 static int 242 gen6_render_ring_flush(struct i915_request *rq, u32 mode) 243 { 244 u32 scratch_addr = 245 intel_gt_scratch_offset(rq->engine->gt, 246 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); 247 u32 *cs, flags = 0; 248 int ret; 249 250 /* Force SNB workarounds for PIPE_CONTROL flushes */ 251 ret = gen6_emit_post_sync_nonzero_flush(rq); 252 if (ret) 253 return ret; 254 255 /* Just flush everything. Experiments have shown that reducing the 256 * number of bits based on the write domains has little performance 257 * impact. 258 */ 259 if (mode & EMIT_FLUSH) { 260 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 261 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 262 /* 263 * Ensure that any following seqno writes only happen 264 * when the render cache is indeed flushed. 265 */ 266 flags |= PIPE_CONTROL_CS_STALL; 267 } 268 if (mode & EMIT_INVALIDATE) { 269 flags |= PIPE_CONTROL_TLB_INVALIDATE; 270 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 271 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 272 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 273 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 274 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 275 /* 276 * TLB invalidate requires a post-sync write. 277 */ 278 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 279 } 280 281 cs = intel_ring_begin(rq, 4); 282 if (IS_ERR(cs)) 283 return PTR_ERR(cs); 284 285 *cs++ = GFX_OP_PIPE_CONTROL(4); 286 *cs++ = flags; 287 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT; 288 *cs++ = 0; 289 intel_ring_advance(rq, cs); 290 291 return 0; 292 } 293 294 static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 295 { 296 /* First we do the gen6_emit_post_sync_nonzero_flush w/a */ 297 *cs++ = GFX_OP_PIPE_CONTROL(4); 298 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 299 *cs++ = 0; 300 *cs++ = 0; 301 302 *cs++ = GFX_OP_PIPE_CONTROL(4); 303 *cs++ = PIPE_CONTROL_QW_WRITE; 304 *cs++ = intel_gt_scratch_offset(rq->engine->gt, 305 INTEL_GT_SCRATCH_FIELD_DEFAULT) | 306 PIPE_CONTROL_GLOBAL_GTT; 307 *cs++ = 0; 308 309 /* Finally we can flush and with it emit the breadcrumb */ 310 *cs++ = GFX_OP_PIPE_CONTROL(4); 311 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 312 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 313 PIPE_CONTROL_DC_FLUSH_ENABLE | 314 PIPE_CONTROL_QW_WRITE | 315 PIPE_CONTROL_CS_STALL); 316 *cs++ = i915_request_active_timeline(rq)->hwsp_offset | 317 PIPE_CONTROL_GLOBAL_GTT; 318 *cs++ = rq->fence.seqno; 319 320 *cs++ = MI_USER_INTERRUPT; 321 *cs++ = MI_NOOP; 322 323 rq->tail = intel_ring_offset(rq, cs); 324 assert_ring_tail_valid(rq->ring, rq->tail); 325 326 return cs; 327 } 328 329 static int 330 gen7_render_ring_cs_stall_wa(struct i915_request *rq) 331 { 332 u32 *cs; 333 334 cs = intel_ring_begin(rq, 4); 335 if (IS_ERR(cs)) 336 return PTR_ERR(cs); 337 338 *cs++ = GFX_OP_PIPE_CONTROL(4); 339 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD; 340 *cs++ = 0; 341 *cs++ = 0; 342 intel_ring_advance(rq, cs); 343 344 return 0; 345 } 346 347 static int 348 gen7_render_ring_flush(struct i915_request *rq, u32 mode) 349 { 350 u32 scratch_addr = 351 intel_gt_scratch_offset(rq->engine->gt, 352 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); 353 u32 *cs, flags = 0; 354 355 /* 356 * Ensure that any following seqno writes only happen when the render 357 * cache is indeed flushed. 358 * 359 * Workaround: 4th PIPE_CONTROL command (except the ones with only 360 * read-cache invalidate bits set) must have the CS_STALL bit set. We 361 * don't try to be clever and just set it unconditionally. 362 */ 363 flags |= PIPE_CONTROL_CS_STALL; 364 365 /* 366 * CS_STALL suggests at least a post-sync write. 367 */ 368 flags |= PIPE_CONTROL_QW_WRITE; 369 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 370 371 /* Just flush everything. Experiments have shown that reducing the 372 * number of bits based on the write domains has little performance 373 * impact. 374 */ 375 if (mode & EMIT_FLUSH) { 376 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 377 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 378 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 379 flags |= PIPE_CONTROL_FLUSH_ENABLE; 380 } 381 if (mode & EMIT_INVALIDATE) { 382 flags |= PIPE_CONTROL_TLB_INVALIDATE; 383 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 384 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 385 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 386 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 387 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 388 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 389 390 /* Workaround: we must issue a pipe_control with CS-stall bit 391 * set before a pipe_control command that has the state cache 392 * invalidate bit set. */ 393 gen7_render_ring_cs_stall_wa(rq); 394 } 395 396 cs = intel_ring_begin(rq, 4); 397 if (IS_ERR(cs)) 398 return PTR_ERR(cs); 399 400 *cs++ = GFX_OP_PIPE_CONTROL(4); 401 *cs++ = flags; 402 *cs++ = scratch_addr; 403 *cs++ = 0; 404 intel_ring_advance(rq, cs); 405 406 return 0; 407 } 408 409 static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 410 { 411 *cs++ = GFX_OP_PIPE_CONTROL(4); 412 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 413 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 414 PIPE_CONTROL_DC_FLUSH_ENABLE | 415 PIPE_CONTROL_FLUSH_ENABLE | 416 PIPE_CONTROL_QW_WRITE | 417 PIPE_CONTROL_GLOBAL_GTT_IVB | 418 PIPE_CONTROL_CS_STALL); 419 *cs++ = i915_request_active_timeline(rq)->hwsp_offset; 420 *cs++ = rq->fence.seqno; 421 422 *cs++ = MI_USER_INTERRUPT; 423 *cs++ = MI_NOOP; 424 425 rq->tail = intel_ring_offset(rq, cs); 426 assert_ring_tail_valid(rq->ring, rq->tail); 427 428 return cs; 429 } 430 431 static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 432 { 433 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 434 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 435 436 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 437 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; 438 *cs++ = rq->fence.seqno; 439 440 *cs++ = MI_USER_INTERRUPT; 441 442 rq->tail = intel_ring_offset(rq, cs); 443 assert_ring_tail_valid(rq->ring, rq->tail); 444 445 return cs; 446 } 447 448 #define GEN7_XCS_WA 32 449 static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) 450 { 451 int i; 452 453 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 454 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 455 456 *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB | 457 MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; 458 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; 459 *cs++ = rq->fence.seqno; 460 461 for (i = 0; i < GEN7_XCS_WA; i++) { 462 *cs++ = MI_STORE_DWORD_INDEX; 463 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 464 *cs++ = rq->fence.seqno; 465 } 466 467 *cs++ = MI_FLUSH_DW; 468 *cs++ = 0; 469 *cs++ = 0; 470 471 *cs++ = MI_USER_INTERRUPT; 472 *cs++ = MI_NOOP; 473 474 rq->tail = intel_ring_offset(rq, cs); 475 assert_ring_tail_valid(rq->ring, rq->tail); 476 477 return cs; 478 } 479 #undef GEN7_XCS_WA 480 481 static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 482 { 483 /* 484 * Keep the render interrupt unmasked as this papers over 485 * lost interrupts following a reset. 486 */ 487 if (engine->class == RENDER_CLASS) { 488 if (INTEL_GEN(engine->i915) >= 6) 489 mask &= ~BIT(0); 490 else 491 mask &= ~I915_USER_INTERRUPT; 492 } 493 494 intel_engine_set_hwsp_writemask(engine, mask); 495 } 496 497 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 498 { 499 u32 addr; 500 501 addr = lower_32_bits(phys); 502 if (INTEL_GEN(engine->i915) >= 4) 503 addr |= (phys >> 28) & 0xf0; 504 505 intel_uncore_write(engine->uncore, HWS_PGA, addr); 506 } 507 508 static struct page *status_page(struct intel_engine_cs *engine) 509 { 510 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 511 512 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 513 return sg_page(obj->mm.pages->sgl); 514 } 515 516 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 517 { 518 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 519 set_hwstam(engine, ~0u); 520 } 521 522 static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 523 { 524 i915_reg_t hwsp; 525 526 /* 527 * The ring status page addresses are no longer next to the rest of 528 * the ring registers as of gen7. 529 */ 530 if (IS_GEN(engine->i915, 7)) { 531 switch (engine->id) { 532 /* 533 * No more rings exist on Gen7. Default case is only to shut up 534 * gcc switch check warning. 535 */ 536 default: 537 GEM_BUG_ON(engine->id); 538 /* fallthrough */ 539 case RCS0: 540 hwsp = RENDER_HWS_PGA_GEN7; 541 break; 542 case BCS0: 543 hwsp = BLT_HWS_PGA_GEN7; 544 break; 545 case VCS0: 546 hwsp = BSD_HWS_PGA_GEN7; 547 break; 548 case VECS0: 549 hwsp = VEBOX_HWS_PGA_GEN7; 550 break; 551 } 552 } else if (IS_GEN(engine->i915, 6)) { 553 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 554 } else { 555 hwsp = RING_HWS_PGA(engine->mmio_base); 556 } 557 558 intel_uncore_write(engine->uncore, hwsp, offset); 559 intel_uncore_posting_read(engine->uncore, hwsp); 560 } 561 562 static void flush_cs_tlb(struct intel_engine_cs *engine) 563 { 564 struct drm_i915_private *dev_priv = engine->i915; 565 566 if (!IS_GEN_RANGE(dev_priv, 6, 7)) 567 return; 568 569 /* ring should be idle before issuing a sync flush*/ 570 drm_WARN_ON(&dev_priv->drm, 571 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 572 573 ENGINE_WRITE(engine, RING_INSTPM, 574 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 575 INSTPM_SYNC_FLUSH)); 576 if (intel_wait_for_register(engine->uncore, 577 RING_INSTPM(engine->mmio_base), 578 INSTPM_SYNC_FLUSH, 0, 579 1000)) 580 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 581 engine->name); 582 } 583 584 static void ring_setup_status_page(struct intel_engine_cs *engine) 585 { 586 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 587 set_hwstam(engine, ~0u); 588 589 flush_cs_tlb(engine); 590 } 591 592 static bool stop_ring(struct intel_engine_cs *engine) 593 { 594 struct drm_i915_private *dev_priv = engine->i915; 595 596 if (INTEL_GEN(dev_priv) > 2) { 597 ENGINE_WRITE(engine, 598 RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING)); 599 if (intel_wait_for_register(engine->uncore, 600 RING_MI_MODE(engine->mmio_base), 601 MODE_IDLE, 602 MODE_IDLE, 603 1000)) { 604 DRM_ERROR("%s : timed out trying to stop ring\n", 605 engine->name); 606 607 /* 608 * Sometimes we observe that the idle flag is not 609 * set even though the ring is empty. So double 610 * check before giving up. 611 */ 612 if (ENGINE_READ(engine, RING_HEAD) != 613 ENGINE_READ(engine, RING_TAIL)) 614 return false; 615 } 616 } 617 618 ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL)); 619 620 ENGINE_WRITE(engine, RING_HEAD, 0); 621 ENGINE_WRITE(engine, RING_TAIL, 0); 622 623 /* The ring must be empty before it is disabled */ 624 ENGINE_WRITE(engine, RING_CTL, 0); 625 626 return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0; 627 } 628 629 static struct i915_address_space *vm_alias(struct i915_address_space *vm) 630 { 631 if (i915_is_ggtt(vm)) 632 vm = &i915_vm_to_ggtt(vm)->alias->vm; 633 634 return vm; 635 } 636 637 static void set_pp_dir(struct intel_engine_cs *engine) 638 { 639 struct i915_address_space *vm = vm_alias(engine->gt->vm); 640 641 if (vm) { 642 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 643 644 ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 645 ENGINE_WRITE(engine, RING_PP_DIR_BASE, 646 px_base(ppgtt->pd)->ggtt_offset << 10); 647 } 648 } 649 650 static int xcs_resume(struct intel_engine_cs *engine) 651 { 652 struct drm_i915_private *dev_priv = engine->i915; 653 struct intel_ring *ring = engine->legacy.ring; 654 int ret = 0; 655 656 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", 657 ring->head, ring->tail); 658 659 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); 660 661 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 662 if (!stop_ring(engine)) { 663 /* G45 ring initialization often fails to reset head to zero */ 664 DRM_DEBUG_DRIVER("%s head not reset to zero " 665 "ctl %08x head %08x tail %08x start %08x\n", 666 engine->name, 667 ENGINE_READ(engine, RING_CTL), 668 ENGINE_READ(engine, RING_HEAD), 669 ENGINE_READ(engine, RING_TAIL), 670 ENGINE_READ(engine, RING_START)); 671 672 if (!stop_ring(engine)) { 673 DRM_ERROR("failed to set %s head to zero " 674 "ctl %08x head %08x tail %08x start %08x\n", 675 engine->name, 676 ENGINE_READ(engine, RING_CTL), 677 ENGINE_READ(engine, RING_HEAD), 678 ENGINE_READ(engine, RING_TAIL), 679 ENGINE_READ(engine, RING_START)); 680 ret = -EIO; 681 goto out; 682 } 683 } 684 685 if (HWS_NEEDS_PHYSICAL(dev_priv)) 686 ring_setup_phys_status_page(engine); 687 else 688 ring_setup_status_page(engine); 689 690 intel_engine_reset_breadcrumbs(engine); 691 692 /* Enforce ordering by reading HEAD register back */ 693 ENGINE_POSTING_READ(engine, RING_HEAD); 694 695 /* 696 * Initialize the ring. This must happen _after_ we've cleared the ring 697 * registers with the above sequence (the readback of the HEAD registers 698 * also enforces ordering), otherwise the hw might lose the new ring 699 * register values. 700 */ 701 ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma)); 702 703 /* Check that the ring offsets point within the ring! */ 704 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 705 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 706 intel_ring_update_space(ring); 707 708 set_pp_dir(engine); 709 710 /* First wake the ring up to an empty/idle ring */ 711 ENGINE_WRITE(engine, RING_HEAD, ring->head); 712 ENGINE_WRITE(engine, RING_TAIL, ring->head); 713 ENGINE_POSTING_READ(engine, RING_TAIL); 714 715 ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); 716 717 /* If the head is still not zero, the ring is dead */ 718 if (intel_wait_for_register(engine->uncore, 719 RING_CTL(engine->mmio_base), 720 RING_VALID, RING_VALID, 721 50)) { 722 DRM_ERROR("%s initialization failed " 723 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 724 engine->name, 725 ENGINE_READ(engine, RING_CTL), 726 ENGINE_READ(engine, RING_CTL) & RING_VALID, 727 ENGINE_READ(engine, RING_HEAD), ring->head, 728 ENGINE_READ(engine, RING_TAIL), ring->tail, 729 ENGINE_READ(engine, RING_START), 730 i915_ggtt_offset(ring->vma)); 731 ret = -EIO; 732 goto out; 733 } 734 735 if (INTEL_GEN(dev_priv) > 2) 736 ENGINE_WRITE(engine, 737 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 738 739 /* Now awake, let it get started */ 740 if (ring->tail != ring->head) { 741 ENGINE_WRITE(engine, RING_TAIL, ring->tail); 742 ENGINE_POSTING_READ(engine, RING_TAIL); 743 } 744 745 /* Papering over lost _interrupts_ immediately following the restart */ 746 intel_engine_signal_breadcrumbs(engine); 747 out: 748 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); 749 750 return ret; 751 } 752 753 static void reset_prepare(struct intel_engine_cs *engine) 754 { 755 struct intel_uncore *uncore = engine->uncore; 756 const u32 base = engine->mmio_base; 757 758 /* 759 * We stop engines, otherwise we might get failed reset and a 760 * dead gpu (on elk). Also as modern gpu as kbl can suffer 761 * from system hang if batchbuffer is progressing when 762 * the reset is issued, regardless of READY_TO_RESET ack. 763 * Thus assume it is best to stop engines on all gens 764 * where we have a gpu reset. 765 * 766 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 767 * 768 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 769 * 770 * FIXME: Wa for more modern gens needs to be validated 771 */ 772 ENGINE_TRACE(engine, "\n"); 773 774 if (intel_engine_stop_cs(engine)) 775 ENGINE_TRACE(engine, "timed out on STOP_RING\n"); 776 777 intel_uncore_write_fw(uncore, 778 RING_HEAD(base), 779 intel_uncore_read_fw(uncore, RING_TAIL(base))); 780 intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */ 781 782 intel_uncore_write_fw(uncore, RING_HEAD(base), 0); 783 intel_uncore_write_fw(uncore, RING_TAIL(base), 0); 784 intel_uncore_posting_read_fw(uncore, RING_TAIL(base)); 785 786 /* The ring must be empty before it is disabled */ 787 intel_uncore_write_fw(uncore, RING_CTL(base), 0); 788 789 /* Check acts as a post */ 790 if (intel_uncore_read_fw(uncore, RING_HEAD(base))) 791 ENGINE_TRACE(engine, "ring head [%x] not parked\n", 792 intel_uncore_read_fw(uncore, RING_HEAD(base))); 793 } 794 795 static void reset_rewind(struct intel_engine_cs *engine, bool stalled) 796 { 797 struct i915_request *pos, *rq; 798 unsigned long flags; 799 u32 head; 800 801 rq = NULL; 802 spin_lock_irqsave(&engine->active.lock, flags); 803 list_for_each_entry(pos, &engine->active.requests, sched.link) { 804 if (!i915_request_completed(pos)) { 805 rq = pos; 806 break; 807 } 808 } 809 810 /* 811 * The guilty request will get skipped on a hung engine. 812 * 813 * Users of client default contexts do not rely on logical 814 * state preserved between batches so it is safe to execute 815 * queued requests following the hang. Non default contexts 816 * rely on preserved state, so skipping a batch loses the 817 * evolution of the state and it needs to be considered corrupted. 818 * Executing more queued batches on top of corrupted state is 819 * risky. But we take the risk by trying to advance through 820 * the queued requests in order to make the client behaviour 821 * more predictable around resets, by not throwing away random 822 * amount of batches it has prepared for execution. Sophisticated 823 * clients can use gem_reset_stats_ioctl and dma fence status 824 * (exported via sync_file info ioctl on explicit fences) to observe 825 * when it loses the context state and should rebuild accordingly. 826 * 827 * The context ban, and ultimately the client ban, mechanism are safety 828 * valves if client submission ends up resulting in nothing more than 829 * subsequent hangs. 830 */ 831 832 if (rq) { 833 /* 834 * Try to restore the logical GPU state to match the 835 * continuation of the request queue. If we skip the 836 * context/PD restore, then the next request may try to execute 837 * assuming that its context is valid and loaded on the GPU and 838 * so may try to access invalid memory, prompting repeated GPU 839 * hangs. 840 * 841 * If the request was guilty, we still restore the logical 842 * state in case the next request requires it (e.g. the 843 * aliasing ppgtt), but skip over the hung batch. 844 * 845 * If the request was innocent, we try to replay the request 846 * with the restored context. 847 */ 848 __i915_request_reset(rq, stalled); 849 850 GEM_BUG_ON(rq->ring != engine->legacy.ring); 851 head = rq->head; 852 } else { 853 head = engine->legacy.ring->tail; 854 } 855 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 856 857 spin_unlock_irqrestore(&engine->active.lock, flags); 858 } 859 860 static void reset_finish(struct intel_engine_cs *engine) 861 { 862 } 863 864 static int rcs_resume(struct intel_engine_cs *engine) 865 { 866 struct drm_i915_private *i915 = engine->i915; 867 struct intel_uncore *uncore = engine->uncore; 868 869 /* 870 * Disable CONSTANT_BUFFER before it is loaded from the context 871 * image. For as it is loaded, it is executed and the stored 872 * address may no longer be valid, leading to a GPU hang. 873 * 874 * This imposes the requirement that userspace reload their 875 * CONSTANT_BUFFER on every batch, fortunately a requirement 876 * they are already accustomed to from before contexts were 877 * enabled. 878 */ 879 if (IS_GEN(i915, 4)) 880 intel_uncore_write(uncore, ECOSKPD, 881 _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE)); 882 883 if (IS_GEN_RANGE(i915, 6, 7)) 884 intel_uncore_write(uncore, INSTPM, 885 _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 886 887 return xcs_resume(engine); 888 } 889 890 static void reset_cancel(struct intel_engine_cs *engine) 891 { 892 struct i915_request *request; 893 unsigned long flags; 894 895 spin_lock_irqsave(&engine->active.lock, flags); 896 897 /* Mark all submitted requests as skipped. */ 898 list_for_each_entry(request, &engine->active.requests, sched.link) { 899 i915_request_set_error_once(request, -EIO); 900 i915_request_mark_complete(request); 901 } 902 903 /* Remaining _unready_ requests will be nop'ed when submitted */ 904 905 spin_unlock_irqrestore(&engine->active.lock, flags); 906 } 907 908 static void i9xx_submit_request(struct i915_request *request) 909 { 910 i915_request_submit(request); 911 wmb(); /* paranoid flush writes out of the WCB before mmio */ 912 913 ENGINE_WRITE(request->engine, RING_TAIL, 914 intel_ring_set_tail(request->ring, request->tail)); 915 } 916 917 static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) 918 { 919 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 920 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 921 922 *cs++ = MI_FLUSH; 923 924 *cs++ = MI_STORE_DWORD_INDEX; 925 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 926 *cs++ = rq->fence.seqno; 927 928 *cs++ = MI_USER_INTERRUPT; 929 *cs++ = MI_NOOP; 930 931 rq->tail = intel_ring_offset(rq, cs); 932 assert_ring_tail_valid(rq->ring, rq->tail); 933 934 return cs; 935 } 936 937 #define GEN5_WA_STORES 8 /* must be at least 1! */ 938 static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) 939 { 940 int i; 941 942 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); 943 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); 944 945 *cs++ = MI_FLUSH; 946 947 BUILD_BUG_ON(GEN5_WA_STORES < 1); 948 for (i = 0; i < GEN5_WA_STORES; i++) { 949 *cs++ = MI_STORE_DWORD_INDEX; 950 *cs++ = I915_GEM_HWS_SEQNO_ADDR; 951 *cs++ = rq->fence.seqno; 952 } 953 954 *cs++ = MI_USER_INTERRUPT; 955 956 rq->tail = intel_ring_offset(rq, cs); 957 assert_ring_tail_valid(rq->ring, rq->tail); 958 959 return cs; 960 } 961 #undef GEN5_WA_STORES 962 963 static void 964 gen5_irq_enable(struct intel_engine_cs *engine) 965 { 966 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask); 967 } 968 969 static void 970 gen5_irq_disable(struct intel_engine_cs *engine) 971 { 972 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask); 973 } 974 975 static void 976 i9xx_irq_enable(struct intel_engine_cs *engine) 977 { 978 engine->i915->irq_mask &= ~engine->irq_enable_mask; 979 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); 980 intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR); 981 } 982 983 static void 984 i9xx_irq_disable(struct intel_engine_cs *engine) 985 { 986 engine->i915->irq_mask |= engine->irq_enable_mask; 987 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask); 988 } 989 990 static void 991 i8xx_irq_enable(struct intel_engine_cs *engine) 992 { 993 struct drm_i915_private *i915 = engine->i915; 994 995 i915->irq_mask &= ~engine->irq_enable_mask; 996 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); 997 ENGINE_POSTING_READ16(engine, RING_IMR); 998 } 999 1000 static void 1001 i8xx_irq_disable(struct intel_engine_cs *engine) 1002 { 1003 struct drm_i915_private *i915 = engine->i915; 1004 1005 i915->irq_mask |= engine->irq_enable_mask; 1006 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask); 1007 } 1008 1009 static int 1010 bsd_ring_flush(struct i915_request *rq, u32 mode) 1011 { 1012 u32 *cs; 1013 1014 cs = intel_ring_begin(rq, 2); 1015 if (IS_ERR(cs)) 1016 return PTR_ERR(cs); 1017 1018 *cs++ = MI_FLUSH; 1019 *cs++ = MI_NOOP; 1020 intel_ring_advance(rq, cs); 1021 return 0; 1022 } 1023 1024 static void 1025 gen6_irq_enable(struct intel_engine_cs *engine) 1026 { 1027 ENGINE_WRITE(engine, RING_IMR, 1028 ~(engine->irq_enable_mask | engine->irq_keep_mask)); 1029 1030 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ 1031 ENGINE_POSTING_READ(engine, RING_IMR); 1032 1033 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask); 1034 } 1035 1036 static void 1037 gen6_irq_disable(struct intel_engine_cs *engine) 1038 { 1039 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); 1040 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask); 1041 } 1042 1043 static void 1044 hsw_vebox_irq_enable(struct intel_engine_cs *engine) 1045 { 1046 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask); 1047 1048 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ 1049 ENGINE_POSTING_READ(engine, RING_IMR); 1050 1051 gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask); 1052 } 1053 1054 static void 1055 hsw_vebox_irq_disable(struct intel_engine_cs *engine) 1056 { 1057 ENGINE_WRITE(engine, RING_IMR, ~0); 1058 gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask); 1059 } 1060 1061 static int 1062 i965_emit_bb_start(struct i915_request *rq, 1063 u64 offset, u32 length, 1064 unsigned int dispatch_flags) 1065 { 1066 u32 *cs; 1067 1068 cs = intel_ring_begin(rq, 2); 1069 if (IS_ERR(cs)) 1070 return PTR_ERR(cs); 1071 1072 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & 1073 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965); 1074 *cs++ = offset; 1075 intel_ring_advance(rq, cs); 1076 1077 return 0; 1078 } 1079 1080 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1081 #define I830_BATCH_LIMIT SZ_256K 1082 #define I830_TLB_ENTRIES (2) 1083 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1084 static int 1085 i830_emit_bb_start(struct i915_request *rq, 1086 u64 offset, u32 len, 1087 unsigned int dispatch_flags) 1088 { 1089 u32 *cs, cs_offset = 1090 intel_gt_scratch_offset(rq->engine->gt, 1091 INTEL_GT_SCRATCH_FIELD_DEFAULT); 1092 1093 GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE); 1094 1095 cs = intel_ring_begin(rq, 6); 1096 if (IS_ERR(cs)) 1097 return PTR_ERR(cs); 1098 1099 /* Evict the invalid PTE TLBs */ 1100 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA; 1101 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096; 1102 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */ 1103 *cs++ = cs_offset; 1104 *cs++ = 0xdeadbeef; 1105 *cs++ = MI_NOOP; 1106 intel_ring_advance(rq, cs); 1107 1108 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 1109 if (len > I830_BATCH_LIMIT) 1110 return -ENOSPC; 1111 1112 cs = intel_ring_begin(rq, 6 + 2); 1113 if (IS_ERR(cs)) 1114 return PTR_ERR(cs); 1115 1116 /* Blit the batch (which has now all relocs applied) to the 1117 * stable batch scratch bo area (so that the CS never 1118 * stumbles over its tlb invalidation bug) ... 1119 */ 1120 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2); 1121 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096; 1122 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096; 1123 *cs++ = cs_offset; 1124 *cs++ = 4096; 1125 *cs++ = offset; 1126 1127 *cs++ = MI_FLUSH; 1128 *cs++ = MI_NOOP; 1129 intel_ring_advance(rq, cs); 1130 1131 /* ... and execute it. */ 1132 offset = cs_offset; 1133 } 1134 1135 cs = intel_ring_begin(rq, 2); 1136 if (IS_ERR(cs)) 1137 return PTR_ERR(cs); 1138 1139 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1140 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1141 MI_BATCH_NON_SECURE); 1142 intel_ring_advance(rq, cs); 1143 1144 return 0; 1145 } 1146 1147 static int 1148 i915_emit_bb_start(struct i915_request *rq, 1149 u64 offset, u32 len, 1150 unsigned int dispatch_flags) 1151 { 1152 u32 *cs; 1153 1154 cs = intel_ring_begin(rq, 2); 1155 if (IS_ERR(cs)) 1156 return PTR_ERR(cs); 1157 1158 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 1159 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : 1160 MI_BATCH_NON_SECURE); 1161 intel_ring_advance(rq, cs); 1162 1163 return 0; 1164 } 1165 1166 static void __ring_context_fini(struct intel_context *ce) 1167 { 1168 i915_vma_put(ce->state); 1169 } 1170 1171 static void ring_context_destroy(struct kref *ref) 1172 { 1173 struct intel_context *ce = container_of(ref, typeof(*ce), ref); 1174 1175 GEM_BUG_ON(intel_context_is_pinned(ce)); 1176 1177 if (ce->state) 1178 __ring_context_fini(ce); 1179 1180 intel_context_fini(ce); 1181 intel_context_free(ce); 1182 } 1183 1184 static int __context_pin_ppgtt(struct intel_context *ce) 1185 { 1186 struct i915_address_space *vm; 1187 int err = 0; 1188 1189 vm = vm_alias(ce->vm); 1190 if (vm) 1191 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm))); 1192 1193 return err; 1194 } 1195 1196 static void __context_unpin_ppgtt(struct intel_context *ce) 1197 { 1198 struct i915_address_space *vm; 1199 1200 vm = vm_alias(ce->vm); 1201 if (vm) 1202 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); 1203 } 1204 1205 static void ring_context_unpin(struct intel_context *ce) 1206 { 1207 __context_unpin_ppgtt(ce); 1208 } 1209 1210 static struct i915_vma * 1211 alloc_context_vma(struct intel_engine_cs *engine) 1212 { 1213 struct drm_i915_private *i915 = engine->i915; 1214 struct drm_i915_gem_object *obj; 1215 struct i915_vma *vma; 1216 int err; 1217 1218 obj = i915_gem_object_create_shmem(i915, engine->context_size); 1219 if (IS_ERR(obj)) 1220 return ERR_CAST(obj); 1221 1222 /* 1223 * Try to make the context utilize L3 as well as LLC. 1224 * 1225 * On VLV we don't have L3 controls in the PTEs so we 1226 * shouldn't touch the cache level, especially as that 1227 * would make the object snooped which might have a 1228 * negative performance impact. 1229 * 1230 * Snooping is required on non-llc platforms in execlist 1231 * mode, but since all GGTT accesses use PAT entry 0 we 1232 * get snooping anyway regardless of cache_level. 1233 * 1234 * This is only applicable for Ivy Bridge devices since 1235 * later platforms don't have L3 control bits in the PTE. 1236 */ 1237 if (IS_IVYBRIDGE(i915)) 1238 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); 1239 1240 if (engine->default_state) { 1241 void *defaults, *vaddr; 1242 1243 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 1244 if (IS_ERR(vaddr)) { 1245 err = PTR_ERR(vaddr); 1246 goto err_obj; 1247 } 1248 1249 defaults = i915_gem_object_pin_map(engine->default_state, 1250 I915_MAP_WB); 1251 if (IS_ERR(defaults)) { 1252 err = PTR_ERR(defaults); 1253 goto err_map; 1254 } 1255 1256 memcpy(vaddr, defaults, engine->context_size); 1257 i915_gem_object_unpin_map(engine->default_state); 1258 1259 i915_gem_object_flush_map(obj); 1260 i915_gem_object_unpin_map(obj); 1261 } 1262 1263 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 1264 if (IS_ERR(vma)) { 1265 err = PTR_ERR(vma); 1266 goto err_obj; 1267 } 1268 1269 return vma; 1270 1271 err_map: 1272 i915_gem_object_unpin_map(obj); 1273 err_obj: 1274 i915_gem_object_put(obj); 1275 return ERR_PTR(err); 1276 } 1277 1278 static int ring_context_alloc(struct intel_context *ce) 1279 { 1280 struct intel_engine_cs *engine = ce->engine; 1281 1282 /* One ringbuffer to rule them all */ 1283 GEM_BUG_ON(!engine->legacy.ring); 1284 ce->ring = engine->legacy.ring; 1285 ce->timeline = intel_timeline_get(engine->legacy.timeline); 1286 1287 GEM_BUG_ON(ce->state); 1288 if (engine->context_size) { 1289 struct i915_vma *vma; 1290 1291 vma = alloc_context_vma(engine); 1292 if (IS_ERR(vma)) 1293 return PTR_ERR(vma); 1294 1295 ce->state = vma; 1296 if (engine->default_state) 1297 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 1298 } 1299 1300 return 0; 1301 } 1302 1303 static int ring_context_pin(struct intel_context *ce) 1304 { 1305 return __context_pin_ppgtt(ce); 1306 } 1307 1308 static void ring_context_reset(struct intel_context *ce) 1309 { 1310 intel_ring_reset(ce->ring, ce->ring->emit); 1311 } 1312 1313 static const struct intel_context_ops ring_context_ops = { 1314 .alloc = ring_context_alloc, 1315 1316 .pin = ring_context_pin, 1317 .unpin = ring_context_unpin, 1318 1319 .enter = intel_context_enter_engine, 1320 .exit = intel_context_exit_engine, 1321 1322 .reset = ring_context_reset, 1323 .destroy = ring_context_destroy, 1324 }; 1325 1326 static int load_pd_dir(struct i915_request *rq, 1327 const struct i915_ppgtt *ppgtt, 1328 u32 valid) 1329 { 1330 const struct intel_engine_cs * const engine = rq->engine; 1331 u32 *cs; 1332 1333 cs = intel_ring_begin(rq, 12); 1334 if (IS_ERR(cs)) 1335 return PTR_ERR(cs); 1336 1337 *cs++ = MI_LOAD_REGISTER_IMM(1); 1338 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); 1339 *cs++ = valid; 1340 1341 *cs++ = MI_LOAD_REGISTER_IMM(1); 1342 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 1343 *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10; 1344 1345 /* Stall until the page table load is complete? */ 1346 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1347 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 1348 *cs++ = intel_gt_scratch_offset(engine->gt, 1349 INTEL_GT_SCRATCH_FIELD_DEFAULT); 1350 1351 *cs++ = MI_LOAD_REGISTER_IMM(1); 1352 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); 1353 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE); 1354 1355 intel_ring_advance(rq, cs); 1356 1357 return rq->engine->emit_flush(rq, EMIT_FLUSH); 1358 } 1359 1360 static inline int mi_set_context(struct i915_request *rq, 1361 struct intel_context *ce, 1362 u32 flags) 1363 { 1364 struct drm_i915_private *i915 = rq->i915; 1365 struct intel_engine_cs *engine = rq->engine; 1366 enum intel_engine_id id; 1367 const int num_engines = 1368 IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; 1369 bool force_restore = false; 1370 int len; 1371 u32 *cs; 1372 1373 len = 4; 1374 if (IS_GEN(i915, 7)) 1375 len += 2 + (num_engines ? 4 * num_engines + 6 : 0); 1376 else if (IS_GEN(i915, 5)) 1377 len += 2; 1378 if (flags & MI_FORCE_RESTORE) { 1379 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 1380 flags &= ~MI_FORCE_RESTORE; 1381 force_restore = true; 1382 len += 2; 1383 } 1384 1385 cs = intel_ring_begin(rq, len); 1386 if (IS_ERR(cs)) 1387 return PTR_ERR(cs); 1388 1389 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 1390 if (IS_GEN(i915, 7)) { 1391 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 1392 if (num_engines) { 1393 struct intel_engine_cs *signaller; 1394 1395 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 1396 for_each_engine(signaller, engine->gt, id) { 1397 if (signaller == engine) 1398 continue; 1399 1400 *cs++ = i915_mmio_reg_offset( 1401 RING_PSMI_CTL(signaller->mmio_base)); 1402 *cs++ = _MASKED_BIT_ENABLE( 1403 GEN6_PSMI_SLEEP_MSG_DISABLE); 1404 } 1405 } 1406 } else if (IS_GEN(i915, 5)) { 1407 /* 1408 * This w/a is only listed for pre-production ilk a/b steppings, 1409 * but is also mentioned for programming the powerctx. To be 1410 * safe, just apply the workaround; we do not use SyncFlush so 1411 * this should never take effect and so be a no-op! 1412 */ 1413 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; 1414 } 1415 1416 if (force_restore) { 1417 /* 1418 * The HW doesn't handle being told to restore the current 1419 * context very well. Quite often it likes goes to go off and 1420 * sulk, especially when it is meant to be reloading PP_DIR. 1421 * A very simple fix to force the reload is to simply switch 1422 * away from the current context and back again. 1423 * 1424 * Note that the kernel_context will contain random state 1425 * following the INHIBIT_RESTORE. We accept this since we 1426 * never use the kernel_context state; it is merely a 1427 * placeholder we use to flush other contexts. 1428 */ 1429 *cs++ = MI_SET_CONTEXT; 1430 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | 1431 MI_MM_SPACE_GTT | 1432 MI_RESTORE_INHIBIT; 1433 } 1434 1435 *cs++ = MI_NOOP; 1436 *cs++ = MI_SET_CONTEXT; 1437 *cs++ = i915_ggtt_offset(ce->state) | flags; 1438 /* 1439 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 1440 * WaMiSetContext_Hang:snb,ivb,vlv 1441 */ 1442 *cs++ = MI_NOOP; 1443 1444 if (IS_GEN(i915, 7)) { 1445 if (num_engines) { 1446 struct intel_engine_cs *signaller; 1447 i915_reg_t last_reg = {}; /* keep gcc quiet */ 1448 1449 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 1450 for_each_engine(signaller, engine->gt, id) { 1451 if (signaller == engine) 1452 continue; 1453 1454 last_reg = RING_PSMI_CTL(signaller->mmio_base); 1455 *cs++ = i915_mmio_reg_offset(last_reg); 1456 *cs++ = _MASKED_BIT_DISABLE( 1457 GEN6_PSMI_SLEEP_MSG_DISABLE); 1458 } 1459 1460 /* Insert a delay before the next switch! */ 1461 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 1462 *cs++ = i915_mmio_reg_offset(last_reg); 1463 *cs++ = intel_gt_scratch_offset(engine->gt, 1464 INTEL_GT_SCRATCH_FIELD_DEFAULT); 1465 *cs++ = MI_NOOP; 1466 } 1467 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 1468 } else if (IS_GEN(i915, 5)) { 1469 *cs++ = MI_SUSPEND_FLUSH; 1470 } 1471 1472 intel_ring_advance(rq, cs); 1473 1474 return 0; 1475 } 1476 1477 static int remap_l3_slice(struct i915_request *rq, int slice) 1478 { 1479 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; 1480 int i; 1481 1482 if (!remap_info) 1483 return 0; 1484 1485 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2); 1486 if (IS_ERR(cs)) 1487 return PTR_ERR(cs); 1488 1489 /* 1490 * Note: We do not worry about the concurrent register cacheline hang 1491 * here because no other code should access these registers other than 1492 * at initialization time. 1493 */ 1494 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4); 1495 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) { 1496 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 1497 *cs++ = remap_info[i]; 1498 } 1499 *cs++ = MI_NOOP; 1500 intel_ring_advance(rq, cs); 1501 1502 return 0; 1503 } 1504 1505 static int remap_l3(struct i915_request *rq) 1506 { 1507 struct i915_gem_context *ctx = i915_request_gem_context(rq); 1508 int i, err; 1509 1510 if (!ctx || !ctx->remap_slice) 1511 return 0; 1512 1513 for (i = 0; i < MAX_L3_SLICES; i++) { 1514 if (!(ctx->remap_slice & BIT(i))) 1515 continue; 1516 1517 err = remap_l3_slice(rq, i); 1518 if (err) 1519 return err; 1520 } 1521 1522 ctx->remap_slice = 0; 1523 return 0; 1524 } 1525 1526 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) 1527 { 1528 int ret; 1529 1530 if (!vm) 1531 return 0; 1532 1533 ret = rq->engine->emit_flush(rq, EMIT_FLUSH); 1534 if (ret) 1535 return ret; 1536 1537 /* 1538 * Not only do we need a full barrier (post-sync write) after 1539 * invalidating the TLBs, but we need to wait a little bit 1540 * longer. Whether this is merely delaying us, or the 1541 * subsequent flush is a key part of serialising with the 1542 * post-sync op, this extra pass appears vital before a 1543 * mm switch! 1544 */ 1545 ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G); 1546 if (ret) 1547 return ret; 1548 1549 return rq->engine->emit_flush(rq, EMIT_INVALIDATE); 1550 } 1551 1552 static int clear_residuals(struct i915_request *rq) 1553 { 1554 struct intel_engine_cs *engine = rq->engine; 1555 int ret; 1556 1557 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); 1558 if (ret) 1559 return ret; 1560 1561 if (engine->kernel_context->state) { 1562 ret = mi_set_context(rq, 1563 engine->kernel_context, 1564 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT); 1565 if (ret) 1566 return ret; 1567 } 1568 1569 ret = engine->emit_bb_start(rq, 1570 engine->wa_ctx.vma->node.start, 0, 1571 0); 1572 if (ret) 1573 return ret; 1574 1575 ret = engine->emit_flush(rq, EMIT_FLUSH); 1576 if (ret) 1577 return ret; 1578 1579 /* Always invalidate before the next switch_mm() */ 1580 return engine->emit_flush(rq, EMIT_INVALIDATE); 1581 } 1582 1583 static int switch_context(struct i915_request *rq) 1584 { 1585 struct intel_engine_cs *engine = rq->engine; 1586 struct intel_context *ce = rq->context; 1587 void **residuals = NULL; 1588 int ret; 1589 1590 GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); 1591 1592 if (engine->wa_ctx.vma && ce != engine->kernel_context) { 1593 if (engine->wa_ctx.vma->private != ce) { 1594 ret = clear_residuals(rq); 1595 if (ret) 1596 return ret; 1597 1598 residuals = &engine->wa_ctx.vma->private; 1599 } 1600 } 1601 1602 ret = switch_mm(rq, vm_alias(ce->vm)); 1603 if (ret) 1604 return ret; 1605 1606 if (ce->state) { 1607 u32 flags; 1608 1609 GEM_BUG_ON(engine->id != RCS0); 1610 1611 /* For resource streamer on HSW+ and power context elsewhere */ 1612 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); 1613 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); 1614 1615 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; 1616 if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) 1617 flags |= MI_RESTORE_EXT_STATE_EN; 1618 else 1619 flags |= MI_RESTORE_INHIBIT; 1620 1621 ret = mi_set_context(rq, ce, flags); 1622 if (ret) 1623 return ret; 1624 } 1625 1626 ret = remap_l3(rq); 1627 if (ret) 1628 return ret; 1629 1630 /* 1631 * Now past the point of no return, this request _will_ be emitted. 1632 * 1633 * Or at least this preamble will be emitted, the request may be 1634 * interrupted prior to submitting the user payload. If so, we 1635 * still submit the "empty" request in order to preserve global 1636 * state tracking such as this, our tracking of the current 1637 * dirty context. 1638 */ 1639 if (residuals) { 1640 intel_context_put(*residuals); 1641 *residuals = intel_context_get(ce); 1642 } 1643 1644 return 0; 1645 } 1646 1647 static int ring_request_alloc(struct i915_request *request) 1648 { 1649 int ret; 1650 1651 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 1652 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); 1653 1654 /* 1655 * Flush enough space to reduce the likelihood of waiting after 1656 * we start building the request - in which case we will just 1657 * have to repeat work. 1658 */ 1659 request->reserved_space += LEGACY_REQUEST_SIZE; 1660 1661 /* Unconditionally invalidate GPU caches and TLBs. */ 1662 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 1663 if (ret) 1664 return ret; 1665 1666 ret = switch_context(request); 1667 if (ret) 1668 return ret; 1669 1670 request->reserved_space -= LEGACY_REQUEST_SIZE; 1671 return 0; 1672 } 1673 1674 static void gen6_bsd_submit_request(struct i915_request *request) 1675 { 1676 struct intel_uncore *uncore = request->engine->uncore; 1677 1678 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1679 1680 /* Every tail move must follow the sequence below */ 1681 1682 /* Disable notification that the ring is IDLE. The GT 1683 * will then assume that it is busy and bring it out of rc6. 1684 */ 1685 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 1686 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1687 1688 /* Clear the context id. Here be magic! */ 1689 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); 1690 1691 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1692 if (__intel_wait_for_register_fw(uncore, 1693 GEN6_BSD_SLEEP_PSMI_CONTROL, 1694 GEN6_BSD_SLEEP_INDICATOR, 1695 0, 1696 1000, 0, NULL)) 1697 drm_err(&uncore->i915->drm, 1698 "timed out waiting for the BSD ring to wake up\n"); 1699 1700 /* Now that the ring is fully powered up, update the tail */ 1701 i9xx_submit_request(request); 1702 1703 /* Let the ring send IDLE messages to the GT again, 1704 * and so let it sleep to conserve power when idle. 1705 */ 1706 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, 1707 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1708 1709 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1710 } 1711 1712 static int mi_flush_dw(struct i915_request *rq, u32 flags) 1713 { 1714 u32 cmd, *cs; 1715 1716 cs = intel_ring_begin(rq, 4); 1717 if (IS_ERR(cs)) 1718 return PTR_ERR(cs); 1719 1720 cmd = MI_FLUSH_DW; 1721 1722 /* 1723 * We always require a command barrier so that subsequent 1724 * commands, such as breadcrumb interrupts, are strictly ordered 1725 * wrt the contents of the write cache being flushed to memory 1726 * (and thus being coherent from the CPU). 1727 */ 1728 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1729 1730 /* 1731 * Bspec vol 1c.3 - blitter engine command streamer: 1732 * "If ENABLED, all TLBs will be invalidated once the flush 1733 * operation is complete. This bit is only valid when the 1734 * Post-Sync Operation field is a value of 1h or 3h." 1735 */ 1736 cmd |= flags; 1737 1738 *cs++ = cmd; 1739 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; 1740 *cs++ = 0; 1741 *cs++ = MI_NOOP; 1742 1743 intel_ring_advance(rq, cs); 1744 1745 return 0; 1746 } 1747 1748 static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags) 1749 { 1750 return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0); 1751 } 1752 1753 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode) 1754 { 1755 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD); 1756 } 1757 1758 static int 1759 hsw_emit_bb_start(struct i915_request *rq, 1760 u64 offset, u32 len, 1761 unsigned int dispatch_flags) 1762 { 1763 u32 *cs; 1764 1765 cs = intel_ring_begin(rq, 2); 1766 if (IS_ERR(cs)) 1767 return PTR_ERR(cs); 1768 1769 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 1770 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW); 1771 /* bit0-7 is the length on GEN6+ */ 1772 *cs++ = offset; 1773 intel_ring_advance(rq, cs); 1774 1775 return 0; 1776 } 1777 1778 static int 1779 gen6_emit_bb_start(struct i915_request *rq, 1780 u64 offset, u32 len, 1781 unsigned int dispatch_flags) 1782 { 1783 u32 *cs; 1784 1785 cs = intel_ring_begin(rq, 2); 1786 if (IS_ERR(cs)) 1787 return PTR_ERR(cs); 1788 1789 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 1790 0 : MI_BATCH_NON_SECURE_I965); 1791 /* bit0-7 is the length on GEN6+ */ 1792 *cs++ = offset; 1793 intel_ring_advance(rq, cs); 1794 1795 return 0; 1796 } 1797 1798 /* Blitter support (SandyBridge+) */ 1799 1800 static int gen6_ring_flush(struct i915_request *rq, u32 mode) 1801 { 1802 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB); 1803 } 1804 1805 static void i9xx_set_default_submission(struct intel_engine_cs *engine) 1806 { 1807 engine->submit_request = i9xx_submit_request; 1808 1809 engine->park = NULL; 1810 engine->unpark = NULL; 1811 } 1812 1813 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 1814 { 1815 i9xx_set_default_submission(engine); 1816 engine->submit_request = gen6_bsd_submit_request; 1817 } 1818 1819 static void ring_release(struct intel_engine_cs *engine) 1820 { 1821 struct drm_i915_private *dev_priv = engine->i915; 1822 1823 drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 && 1824 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 1825 1826 intel_engine_cleanup_common(engine); 1827 1828 if (engine->wa_ctx.vma) { 1829 intel_context_put(engine->wa_ctx.vma->private); 1830 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1831 } 1832 1833 intel_ring_unpin(engine->legacy.ring); 1834 intel_ring_put(engine->legacy.ring); 1835 1836 intel_timeline_unpin(engine->legacy.timeline); 1837 intel_timeline_put(engine->legacy.timeline); 1838 } 1839 1840 static void setup_irq(struct intel_engine_cs *engine) 1841 { 1842 struct drm_i915_private *i915 = engine->i915; 1843 1844 if (INTEL_GEN(i915) >= 6) { 1845 engine->irq_enable = gen6_irq_enable; 1846 engine->irq_disable = gen6_irq_disable; 1847 } else if (INTEL_GEN(i915) >= 5) { 1848 engine->irq_enable = gen5_irq_enable; 1849 engine->irq_disable = gen5_irq_disable; 1850 } else if (INTEL_GEN(i915) >= 3) { 1851 engine->irq_enable = i9xx_irq_enable; 1852 engine->irq_disable = i9xx_irq_disable; 1853 } else { 1854 engine->irq_enable = i8xx_irq_enable; 1855 engine->irq_disable = i8xx_irq_disable; 1856 } 1857 } 1858 1859 static void setup_common(struct intel_engine_cs *engine) 1860 { 1861 struct drm_i915_private *i915 = engine->i915; 1862 1863 /* gen8+ are only supported with execlists */ 1864 GEM_BUG_ON(INTEL_GEN(i915) >= 8); 1865 1866 setup_irq(engine); 1867 1868 engine->resume = xcs_resume; 1869 engine->reset.prepare = reset_prepare; 1870 engine->reset.rewind = reset_rewind; 1871 engine->reset.cancel = reset_cancel; 1872 engine->reset.finish = reset_finish; 1873 1874 engine->cops = &ring_context_ops; 1875 engine->request_alloc = ring_request_alloc; 1876 1877 /* 1878 * Using a global execution timeline; the previous final breadcrumb is 1879 * equivalent to our next initial bread so we can elide 1880 * engine->emit_init_breadcrumb(). 1881 */ 1882 engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb; 1883 if (IS_GEN(i915, 5)) 1884 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 1885 1886 engine->set_default_submission = i9xx_set_default_submission; 1887 1888 if (INTEL_GEN(i915) >= 6) 1889 engine->emit_bb_start = gen6_emit_bb_start; 1890 else if (INTEL_GEN(i915) >= 4) 1891 engine->emit_bb_start = i965_emit_bb_start; 1892 else if (IS_I830(i915) || IS_I845G(i915)) 1893 engine->emit_bb_start = i830_emit_bb_start; 1894 else 1895 engine->emit_bb_start = i915_emit_bb_start; 1896 } 1897 1898 static void setup_rcs(struct intel_engine_cs *engine) 1899 { 1900 struct drm_i915_private *i915 = engine->i915; 1901 1902 if (HAS_L3_DPF(i915)) 1903 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1904 1905 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1906 1907 if (INTEL_GEN(i915) >= 7) { 1908 engine->emit_flush = gen7_render_ring_flush; 1909 engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb; 1910 } else if (IS_GEN(i915, 6)) { 1911 engine->emit_flush = gen6_render_ring_flush; 1912 engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb; 1913 } else if (IS_GEN(i915, 5)) { 1914 engine->emit_flush = gen4_render_ring_flush; 1915 } else { 1916 if (INTEL_GEN(i915) < 4) 1917 engine->emit_flush = gen2_render_ring_flush; 1918 else 1919 engine->emit_flush = gen4_render_ring_flush; 1920 engine->irq_enable_mask = I915_USER_INTERRUPT; 1921 } 1922 1923 if (IS_HASWELL(i915)) 1924 engine->emit_bb_start = hsw_emit_bb_start; 1925 1926 engine->resume = rcs_resume; 1927 } 1928 1929 static void setup_vcs(struct intel_engine_cs *engine) 1930 { 1931 struct drm_i915_private *i915 = engine->i915; 1932 1933 if (INTEL_GEN(i915) >= 6) { 1934 /* gen6 bsd needs a special wa for tail updates */ 1935 if (IS_GEN(i915, 6)) 1936 engine->set_default_submission = gen6_bsd_set_default_submission; 1937 engine->emit_flush = gen6_bsd_ring_flush; 1938 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1939 1940 if (IS_GEN(i915, 6)) 1941 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; 1942 else 1943 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 1944 } else { 1945 engine->emit_flush = bsd_ring_flush; 1946 if (IS_GEN(i915, 5)) 1947 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 1948 else 1949 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1950 } 1951 } 1952 1953 static void setup_bcs(struct intel_engine_cs *engine) 1954 { 1955 struct drm_i915_private *i915 = engine->i915; 1956 1957 engine->emit_flush = gen6_ring_flush; 1958 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 1959 1960 if (IS_GEN(i915, 6)) 1961 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb; 1962 else 1963 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 1964 } 1965 1966 static void setup_vecs(struct intel_engine_cs *engine) 1967 { 1968 struct drm_i915_private *i915 = engine->i915; 1969 1970 GEM_BUG_ON(INTEL_GEN(i915) < 7); 1971 1972 engine->emit_flush = gen6_ring_flush; 1973 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 1974 engine->irq_enable = hsw_vebox_irq_enable; 1975 engine->irq_disable = hsw_vebox_irq_disable; 1976 1977 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb; 1978 } 1979 1980 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, 1981 struct i915_vma * const vma) 1982 { 1983 return gen7_setup_clear_gpr_bb(engine, vma); 1984 } 1985 1986 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine) 1987 { 1988 struct drm_i915_gem_object *obj; 1989 struct i915_vma *vma; 1990 int size; 1991 int err; 1992 1993 size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); 1994 if (size <= 0) 1995 return size; 1996 1997 size = ALIGN(size, PAGE_SIZE); 1998 obj = i915_gem_object_create_internal(engine->i915, size); 1999 if (IS_ERR(obj)) 2000 return PTR_ERR(obj); 2001 2002 vma = i915_vma_instance(obj, engine->gt->vm, NULL); 2003 if (IS_ERR(vma)) { 2004 err = PTR_ERR(vma); 2005 goto err_obj; 2006 } 2007 2008 vma->private = intel_context_create(engine); /* dummy residuals */ 2009 if (IS_ERR(vma->private)) { 2010 err = PTR_ERR(vma->private); 2011 goto err_obj; 2012 } 2013 2014 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH); 2015 if (err) 2016 goto err_private; 2017 2018 err = i915_vma_sync(vma); 2019 if (err) 2020 goto err_unpin; 2021 2022 err = gen7_ctx_switch_bb_setup(engine, vma); 2023 if (err) 2024 goto err_unpin; 2025 2026 engine->wa_ctx.vma = vma; 2027 return 0; 2028 2029 err_unpin: 2030 i915_vma_unpin(vma); 2031 err_private: 2032 intel_context_put(vma->private); 2033 err_obj: 2034 i915_gem_object_put(obj); 2035 return err; 2036 } 2037 2038 int intel_ring_submission_setup(struct intel_engine_cs *engine) 2039 { 2040 struct intel_timeline *timeline; 2041 struct intel_ring *ring; 2042 int err; 2043 2044 setup_common(engine); 2045 2046 switch (engine->class) { 2047 case RENDER_CLASS: 2048 setup_rcs(engine); 2049 break; 2050 case VIDEO_DECODE_CLASS: 2051 setup_vcs(engine); 2052 break; 2053 case COPY_ENGINE_CLASS: 2054 setup_bcs(engine); 2055 break; 2056 case VIDEO_ENHANCEMENT_CLASS: 2057 setup_vecs(engine); 2058 break; 2059 default: 2060 MISSING_CASE(engine->class); 2061 return -ENODEV; 2062 } 2063 2064 timeline = intel_timeline_create(engine->gt, engine->status_page.vma); 2065 if (IS_ERR(timeline)) { 2066 err = PTR_ERR(timeline); 2067 goto err; 2068 } 2069 GEM_BUG_ON(timeline->has_initial_breadcrumb); 2070 2071 err = intel_timeline_pin(timeline); 2072 if (err) 2073 goto err_timeline; 2074 2075 ring = intel_engine_create_ring(engine, SZ_16K); 2076 if (IS_ERR(ring)) { 2077 err = PTR_ERR(ring); 2078 goto err_timeline_unpin; 2079 } 2080 2081 err = intel_ring_pin(ring); 2082 if (err) 2083 goto err_ring; 2084 2085 GEM_BUG_ON(engine->legacy.ring); 2086 engine->legacy.ring = ring; 2087 engine->legacy.timeline = timeline; 2088 2089 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 2090 2091 if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) { 2092 err = gen7_ctx_switch_bb_init(engine); 2093 if (err) 2094 goto err_ring_unpin; 2095 } 2096 2097 /* Finally, take ownership and responsibility for cleanup! */ 2098 engine->release = ring_release; 2099 2100 return 0; 2101 2102 err_ring_unpin: 2103 intel_ring_unpin(ring); 2104 err_ring: 2105 intel_ring_put(ring); 2106 err_timeline_unpin: 2107 intel_timeline_unpin(timeline); 2108 err_timeline: 2109 intel_timeline_put(timeline); 2110 err: 2111 intel_engine_cleanup_common(engine); 2112 return err; 2113 } 2114 2115 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2116 #include "selftest_ring_submission.c" 2117 #endif 2118