1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014-2018 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_reg.h" 8 #include "intel_context.h" 9 #include "intel_engine_pm.h" 10 #include "intel_engine_regs.h" 11 #include "intel_gpu_commands.h" 12 #include "intel_gt.h" 13 #include "intel_gt_mcr.h" 14 #include "intel_gt_regs.h" 15 #include "intel_ring.h" 16 #include "intel_workarounds.h" 17 18 /** 19 * DOC: Hardware workarounds 20 * 21 * Hardware workarounds are register programming documented to be executed in 22 * the driver that fall outside of the normal programming sequences for a 23 * platform. There are some basic categories of workarounds, depending on 24 * how/when they are applied: 25 * 26 * - Context workarounds: workarounds that touch registers that are 27 * saved/restored to/from the HW context image. The list is emitted (via Load 28 * Register Immediate commands) once when initializing the device and saved in 29 * the default context. That default context is then used on every context 30 * creation to have a "primed golden context", i.e. a context image that 31 * already contains the changes needed to all the registers. 32 * 33 * Context workarounds should be implemented in the \*_ctx_workarounds_init() 34 * variants respective to the targeted platforms. 35 * 36 * - Engine workarounds: the list of these WAs is applied whenever the specific 37 * engine is reset. It's also possible that a set of engine classes share a 38 * common power domain and they are reset together. This happens on some 39 * platforms with render and compute engines. In this case (at least) one of 40 * them need to keeep the workaround programming: the approach taken in the 41 * driver is to tie those workarounds to the first compute/render engine that 42 * is registered. When executing with GuC submission, engine resets are 43 * outside of kernel driver control, hence the list of registers involved in 44 * written once, on engine initialization, and then passed to GuC, that 45 * saves/restores their values before/after the reset takes place. See 46 * ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference. 47 * 48 * Workarounds for registers specific to RCS and CCS should be implemented in 49 * rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for 50 * registers belonging to BCS, VCS or VECS should be implemented in 51 * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific 52 * engine's MMIO range but that are part of of the common RCS/CCS reset domain 53 * should be implemented in general_render_compute_wa_init(). 54 * 55 * - GT workarounds: the list of these WAs is applied whenever these registers 56 * revert to their default values: on GPU reset, suspend/resume [1]_, etc. 57 * 58 * GT workarounds should be implemented in the \*_gt_workarounds_init() 59 * variants respective to the targeted platforms. 60 * 61 * - Register whitelist: some workarounds need to be implemented in userspace, 62 * but need to touch privileged registers. The whitelist in the kernel 63 * instructs the hardware to allow the access to happen. From the kernel side, 64 * this is just a special case of a MMIO workaround (as we write the list of 65 * these to/be-whitelisted registers to some special HW registers). 66 * 67 * Register whitelisting should be done in the \*_whitelist_build() variants 68 * respective to the targeted platforms. 69 * 70 * - Workaround batchbuffers: buffers that get executed automatically by the 71 * hardware on every HW context restore. These buffers are created and 72 * programmed in the default context so the hardware always go through those 73 * programming sequences when switching contexts. The support for workaround 74 * batchbuffers is enabled these hardware mechanisms: 75 * 76 * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default 77 * context, pointing the hardware to jump to that location when that offset 78 * is reached in the context restore. Workaround batchbuffer in the driver 79 * currently uses this mechanism for all platforms. 80 * 81 * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context, 82 * pointing the hardware to a buffer to continue executing after the 83 * engine registers are restored in a context restore sequence. This is 84 * currently not used in the driver. 85 * 86 * - Other: There are WAs that, due to their nature, cannot be applied from a 87 * central place. Those are peppered around the rest of the code, as needed. 88 * Workarounds related to the display IP are the main example. 89 * 90 * .. [1] Technically, some registers are powercontext saved & restored, so they 91 * survive a suspend/resume. In practice, writing them again is not too 92 * costly and simplifies things, so it's the approach taken in the driver. 93 */ 94 95 static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt, 96 const char *name, const char *engine_name) 97 { 98 wal->gt = gt; 99 wal->name = name; 100 wal->engine_name = engine_name; 101 } 102 103 #define WA_LIST_CHUNK (1 << 4) 104 105 static void wa_init_finish(struct i915_wa_list *wal) 106 { 107 /* Trim unused entries. */ 108 if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) { 109 struct i915_wa *list = kmemdup(wal->list, 110 wal->count * sizeof(*list), 111 GFP_KERNEL); 112 113 if (list) { 114 kfree(wal->list); 115 wal->list = list; 116 } 117 } 118 119 if (!wal->count) 120 return; 121 122 drm_dbg(&wal->gt->i915->drm, "Initialized %u %s workarounds on %s\n", 123 wal->wa_count, wal->name, wal->engine_name); 124 } 125 126 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) 127 { 128 unsigned int addr = i915_mmio_reg_offset(wa->reg); 129 struct drm_i915_private *i915 = wal->gt->i915; 130 unsigned int start = 0, end = wal->count; 131 const unsigned int grow = WA_LIST_CHUNK; 132 struct i915_wa *wa_; 133 134 GEM_BUG_ON(!is_power_of_2(grow)); 135 136 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */ 137 struct i915_wa *list; 138 139 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa), 140 GFP_KERNEL); 141 if (!list) { 142 drm_err(&i915->drm, "No space for workaround init!\n"); 143 return; 144 } 145 146 if (wal->list) { 147 memcpy(list, wal->list, sizeof(*wa) * wal->count); 148 kfree(wal->list); 149 } 150 151 wal->list = list; 152 } 153 154 while (start < end) { 155 unsigned int mid = start + (end - start) / 2; 156 157 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) { 158 start = mid + 1; 159 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) { 160 end = mid; 161 } else { 162 wa_ = &wal->list[mid]; 163 164 if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) { 165 drm_err(&i915->drm, 166 "Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n", 167 i915_mmio_reg_offset(wa_->reg), 168 wa_->clr, wa_->set); 169 170 wa_->set &= ~wa->clr; 171 } 172 173 wal->wa_count++; 174 wa_->set |= wa->set; 175 wa_->clr |= wa->clr; 176 wa_->read |= wa->read; 177 return; 178 } 179 } 180 181 wal->wa_count++; 182 wa_ = &wal->list[wal->count++]; 183 *wa_ = *wa; 184 185 while (wa_-- > wal->list) { 186 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) == 187 i915_mmio_reg_offset(wa_[1].reg)); 188 if (i915_mmio_reg_offset(wa_[1].reg) > 189 i915_mmio_reg_offset(wa_[0].reg)) 190 break; 191 192 swap(wa_[1], wa_[0]); 193 } 194 } 195 196 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, 197 u32 clear, u32 set, u32 read_mask, bool masked_reg) 198 { 199 struct i915_wa wa = { 200 .reg = reg, 201 .clr = clear, 202 .set = set, 203 .read = read_mask, 204 .masked_reg = masked_reg, 205 }; 206 207 _wa_add(wal, &wa); 208 } 209 210 static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg, 211 u32 clear, u32 set, u32 read_mask, bool masked_reg) 212 { 213 struct i915_wa wa = { 214 .mcr_reg = reg, 215 .clr = clear, 216 .set = set, 217 .read = read_mask, 218 .masked_reg = masked_reg, 219 .is_mcr = 1, 220 }; 221 222 _wa_add(wal, &wa); 223 } 224 225 static void 226 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set) 227 { 228 wa_add(wal, reg, clear, set, clear, false); 229 } 230 231 static void 232 wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set) 233 { 234 wa_mcr_add(wal, reg, clear, set, clear, false); 235 } 236 237 static void 238 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set) 239 { 240 wa_write_clr_set(wal, reg, ~0, set); 241 } 242 243 static void 244 wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set) 245 { 246 wa_mcr_write_clr_set(wal, reg, ~0, set); 247 } 248 249 static void 250 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set) 251 { 252 wa_write_clr_set(wal, reg, set, set); 253 } 254 255 static void 256 wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set) 257 { 258 wa_mcr_write_clr_set(wal, reg, set, set); 259 } 260 261 static void 262 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr) 263 { 264 wa_write_clr_set(wal, reg, clr, 0); 265 } 266 267 static void 268 wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr) 269 { 270 wa_mcr_write_clr_set(wal, reg, clr, 0); 271 } 272 273 /* 274 * WA operations on "masked register". A masked register has the upper 16 bits 275 * documented as "masked" in b-spec. Its purpose is to allow writing to just a 276 * portion of the register without a rmw: you simply write in the upper 16 bits 277 * the mask of bits you are going to modify. 278 * 279 * The wa_masked_* family of functions already does the necessary operations to 280 * calculate the mask based on the parameters passed, so user only has to 281 * provide the lower 16 bits of that register. 282 */ 283 284 static void 285 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) 286 { 287 wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true); 288 } 289 290 static void 291 wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val) 292 { 293 wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true); 294 } 295 296 static void 297 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val) 298 { 299 wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true); 300 } 301 302 static void 303 wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val) 304 { 305 wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true); 306 } 307 308 static void 309 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg, 310 u32 mask, u32 val) 311 { 312 wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true); 313 } 314 315 static void 316 wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, 317 u32 mask, u32 val) 318 { 319 wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true); 320 } 321 322 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine, 323 struct i915_wa_list *wal) 324 { 325 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING); 326 } 327 328 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine, 329 struct i915_wa_list *wal) 330 { 331 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING); 332 } 333 334 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, 335 struct i915_wa_list *wal) 336 { 337 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING); 338 339 /* WaDisableAsyncFlipPerfMode:bdw,chv */ 340 wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE); 341 342 /* WaDisablePartialInstShootdown:bdw,chv */ 343 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, 344 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 345 346 /* Use Force Non-Coherent whenever executing a 3D context. This is a 347 * workaround for a possible hang in the unlikely event a TLB 348 * invalidation occurs during a PSD flush. 349 */ 350 /* WaForceEnableNonCoherent:bdw,chv */ 351 /* WaHdcDisableFetchWhenMasked:bdw,chv */ 352 wa_masked_en(wal, HDC_CHICKEN0, 353 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 354 HDC_FORCE_NON_COHERENT); 355 356 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 357 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping 358 * polygons in the same 8x4 pixel/sample area to be processed without 359 * stalling waiting for the earlier ones to write to Hierarchical Z 360 * buffer." 361 * 362 * This optimization is off by default for BDW and CHV; turn it on. 363 */ 364 wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 365 366 /* Wa4x4STCOptimizationDisable:bdw,chv */ 367 wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 368 369 /* 370 * BSpec recommends 8x4 when MSAA is used, 371 * however in practice 16x4 seems fastest. 372 * 373 * Note that PS/WM thread counts depend on the WIZ hashing 374 * disable bit, which we don't touch here, but it's good 375 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 376 */ 377 wa_masked_field_set(wal, GEN7_GT_MODE, 378 GEN6_WIZ_HASHING_MASK, 379 GEN6_WIZ_HASHING_16x4); 380 } 381 382 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, 383 struct i915_wa_list *wal) 384 { 385 struct drm_i915_private *i915 = engine->i915; 386 387 gen8_ctx_workarounds_init(engine, wal); 388 389 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 390 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 391 392 /* WaDisableDopClockGating:bdw 393 * 394 * Also see the related UCGTCL1 write in bdw_init_clock_gating() 395 * to disable EUTC clock gating. 396 */ 397 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, 398 DOP_CLOCK_GATING_DISABLE); 399 400 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3, 401 GEN8_SAMPLER_POWER_BYPASS_DIS); 402 403 wa_masked_en(wal, HDC_CHICKEN0, 404 /* WaForceContextSaveRestoreNonCoherent:bdw */ 405 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 406 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 407 (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 408 } 409 410 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, 411 struct i915_wa_list *wal) 412 { 413 gen8_ctx_workarounds_init(engine, wal); 414 415 /* WaDisableThreadStallDopClockGating:chv */ 416 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 417 418 /* Improve HiZ throughput on CHV. */ 419 wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 420 } 421 422 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, 423 struct i915_wa_list *wal) 424 { 425 struct drm_i915_private *i915 = engine->i915; 426 427 if (HAS_LLC(i915)) { 428 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl 429 * 430 * Must match Display Engine. See 431 * WaCompressedResourceDisplayNewHashMode. 432 */ 433 wa_masked_en(wal, COMMON_SLICE_CHICKEN2, 434 GEN9_PBE_COMPRESSED_HASH_SELECTION); 435 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, 436 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR); 437 } 438 439 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ 440 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ 441 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, 442 FLOW_CONTROL_ENABLE | 443 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 444 445 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ 446 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ 447 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, 448 GEN9_ENABLE_YV12_BUGFIX | 449 GEN9_ENABLE_GPGPU_PREEMPTION); 450 451 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */ 452 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */ 453 wa_masked_en(wal, CACHE_MODE_1, 454 GEN8_4x4_STC_OPTIMIZATION_DISABLE | 455 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE); 456 457 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */ 458 wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5, 459 GEN9_CCS_TLB_PREFETCH_ENABLE); 460 461 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */ 462 wa_masked_en(wal, HDC_CHICKEN0, 463 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 464 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); 465 466 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are 467 * both tied to WaForceContextSaveRestoreNonCoherent 468 * in some hsds for skl. We keep the tie for all gen9. The 469 * documentation is a bit hazy and so we want to get common behaviour, 470 * even though there is no clear evidence we would need both on kbl/bxt. 471 * This area has been source of system hangs so we play it safe 472 * and mimic the skl regardless of what bspec says. 473 * 474 * Use Force Non-Coherent whenever executing a 3D context. This 475 * is a workaround for a possible hang in the unlikely event 476 * a TLB invalidation occurs during a PSD flush. 477 */ 478 479 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */ 480 wa_masked_en(wal, HDC_CHICKEN0, 481 HDC_FORCE_NON_COHERENT); 482 483 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ 484 if (IS_SKYLAKE(i915) || 485 IS_KABYLAKE(i915) || 486 IS_COFFEELAKE(i915) || 487 IS_COMETLAKE(i915)) 488 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3, 489 GEN8_SAMPLER_POWER_BYPASS_DIS); 490 491 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */ 492 wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 493 494 /* 495 * Supporting preemption with fine-granularity requires changes in the 496 * batch buffer programming. Since we can't break old userspace, we 497 * need to set our default preemption level to safe value. Userspace is 498 * still able to use more fine-grained preemption levels, since in 499 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the 500 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are 501 * not real HW workarounds, but merely a way to start using preemption 502 * while maintaining old contract with userspace. 503 */ 504 505 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */ 506 wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); 507 508 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */ 509 wa_masked_field_set(wal, GEN8_CS_CHICKEN1, 510 GEN9_PREEMPT_GPGPU_LEVEL_MASK, 511 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); 512 513 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ 514 if (IS_GEN9_LP(i915)) 515 wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); 516 } 517 518 static void skl_tune_iz_hashing(struct intel_engine_cs *engine, 519 struct i915_wa_list *wal) 520 { 521 struct intel_gt *gt = engine->gt; 522 u8 vals[3] = { 0, 0, 0 }; 523 unsigned int i; 524 525 for (i = 0; i < 3; i++) { 526 u8 ss; 527 528 /* 529 * Only consider slices where one, and only one, subslice has 7 530 * EUs 531 */ 532 if (!is_power_of_2(gt->info.sseu.subslice_7eu[i])) 533 continue; 534 535 /* 536 * subslice_7eu[i] != 0 (because of the check above) and 537 * ss_max == 4 (maximum number of subslices possible per slice) 538 * 539 * -> 0 <= ss <= 3; 540 */ 541 ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1; 542 vals[i] = 3 - ss; 543 } 544 545 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) 546 return; 547 548 /* Tune IZ hashing. See intel_device_info_runtime_init() */ 549 wa_masked_field_set(wal, GEN7_GT_MODE, 550 GEN9_IZ_HASHING_MASK(2) | 551 GEN9_IZ_HASHING_MASK(1) | 552 GEN9_IZ_HASHING_MASK(0), 553 GEN9_IZ_HASHING(2, vals[2]) | 554 GEN9_IZ_HASHING(1, vals[1]) | 555 GEN9_IZ_HASHING(0, vals[0])); 556 } 557 558 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine, 559 struct i915_wa_list *wal) 560 { 561 gen9_ctx_workarounds_init(engine, wal); 562 skl_tune_iz_hashing(engine, wal); 563 } 564 565 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine, 566 struct i915_wa_list *wal) 567 { 568 gen9_ctx_workarounds_init(engine, wal); 569 570 /* WaDisableThreadStallDopClockGating:bxt */ 571 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, 572 STALL_DOP_GATING_DISABLE); 573 574 /* WaToEnableHwFixForPushConstHWBug:bxt */ 575 wa_masked_en(wal, COMMON_SLICE_CHICKEN2, 576 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 577 } 578 579 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, 580 struct i915_wa_list *wal) 581 { 582 struct drm_i915_private *i915 = engine->i915; 583 584 gen9_ctx_workarounds_init(engine, wal); 585 586 /* WaToEnableHwFixForPushConstHWBug:kbl */ 587 if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER)) 588 wa_masked_en(wal, COMMON_SLICE_CHICKEN2, 589 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 590 591 /* WaDisableSbeCacheDispatchPortSharing:kbl */ 592 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1, 593 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 594 } 595 596 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine, 597 struct i915_wa_list *wal) 598 { 599 gen9_ctx_workarounds_init(engine, wal); 600 601 /* WaToEnableHwFixForPushConstHWBug:glk */ 602 wa_masked_en(wal, COMMON_SLICE_CHICKEN2, 603 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 604 } 605 606 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, 607 struct i915_wa_list *wal) 608 { 609 gen9_ctx_workarounds_init(engine, wal); 610 611 /* WaToEnableHwFixForPushConstHWBug:cfl */ 612 wa_masked_en(wal, COMMON_SLICE_CHICKEN2, 613 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 614 615 /* WaDisableSbeCacheDispatchPortSharing:cfl */ 616 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1, 617 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 618 } 619 620 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, 621 struct i915_wa_list *wal) 622 { 623 /* Wa_1406697149 (WaDisableBankHangMode:icl) */ 624 wa_write(wal, 625 GEN8_L3CNTLREG, 626 intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) | 627 GEN8_ERRDETBCTRL); 628 629 /* WaForceEnableNonCoherent:icl 630 * This is not the same workaround as in early Gen9 platforms, where 631 * lacking this could cause system hangs, but coherency performance 632 * overhead is high and only a few compute workloads really need it 633 * (the register is whitelisted in hardware now, so UMDs can opt in 634 * for coherency if they have a good reason). 635 */ 636 wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT); 637 638 /* WaEnableFloatBlendOptimization:icl */ 639 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0, 640 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE), 641 0 /* write-only, so skip validation */, 642 true); 643 644 /* WaDisableGPGPUMidThreadPreemption:icl */ 645 wa_masked_field_set(wal, GEN8_CS_CHICKEN1, 646 GEN9_PREEMPT_GPGPU_LEVEL_MASK, 647 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); 648 649 /* allow headerless messages for preemptible GPGPU context */ 650 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE, 651 GEN11_SAMPLER_ENABLE_HEADLESS_MSG); 652 653 /* Wa_1604278689:icl,ehl */ 654 wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID); 655 wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER, 656 0, /* write-only register; skip validation */ 657 0xFFFFFFFF); 658 659 /* Wa_1406306137:icl,ehl */ 660 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU); 661 } 662 663 /* 664 * These settings aren't actually workarounds, but general tuning settings that 665 * need to be programmed on dg2 platform. 666 */ 667 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine, 668 struct i915_wa_list *wal) 669 { 670 wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP); 671 wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, 672 REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)); 673 wa_mcr_add(wal, 674 XEHP_FF_MODE2, 675 FF_MODE2_TDS_TIMER_MASK, 676 FF_MODE2_TDS_TIMER_128, 677 0, false); 678 } 679 680 /* 681 * These settings aren't actually workarounds, but general tuning settings that 682 * need to be programmed on several platforms. 683 */ 684 static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine, 685 struct i915_wa_list *wal) 686 { 687 /* 688 * Although some platforms refer to it as Wa_1604555607, we need to 689 * program it even on those that don't explicitly list that 690 * workaround. 691 * 692 * Note that the programming of this register is further modified 693 * according to the FF_MODE2 guidance given by Wa_1608008084:gen12. 694 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong 695 * value when read. The default value for this register is zero for all 696 * fields and there are no bit masks. So instead of doing a RMW we 697 * should just write TDS timer value. For the same reason read 698 * verification is ignored. 699 */ 700 wa_add(wal, 701 GEN12_FF_MODE2, 702 FF_MODE2_TDS_TIMER_MASK, 703 FF_MODE2_TDS_TIMER_128, 704 0, false); 705 } 706 707 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, 708 struct i915_wa_list *wal) 709 { 710 struct drm_i915_private *i915 = engine->i915; 711 712 gen12_ctx_gt_tuning_init(engine, wal); 713 714 /* 715 * Wa_1409142259:tgl,dg1,adl-p 716 * Wa_1409347922:tgl,dg1,adl-p 717 * Wa_1409252684:tgl,dg1,adl-p 718 * Wa_1409217633:tgl,dg1,adl-p 719 * Wa_1409207793:tgl,dg1,adl-p 720 * Wa_1409178076:tgl,dg1,adl-p 721 * Wa_1408979724:tgl,dg1,adl-p 722 * Wa_14010443199:tgl,rkl,dg1,adl-p 723 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p 724 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p 725 */ 726 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, 727 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); 728 729 /* WaDisableGPGPUMidThreadPreemption:gen12 */ 730 wa_masked_field_set(wal, GEN8_CS_CHICKEN1, 731 GEN9_PREEMPT_GPGPU_LEVEL_MASK, 732 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); 733 734 /* 735 * Wa_16011163337 736 * 737 * Like in gen12_ctx_gt_tuning_init(), read verification is ignored due 738 * to Wa_1608008084. 739 */ 740 wa_add(wal, 741 GEN12_FF_MODE2, 742 FF_MODE2_GS_TIMER_MASK, 743 FF_MODE2_GS_TIMER_224, 744 0, false); 745 746 if (!IS_DG1(i915)) { 747 /* Wa_1806527549 */ 748 wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE); 749 750 /* Wa_1606376872 */ 751 wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC); 752 } 753 } 754 755 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, 756 struct i915_wa_list *wal) 757 { 758 gen12_ctx_workarounds_init(engine, wal); 759 760 /* Wa_1409044764 */ 761 wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3, 762 DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN); 763 764 /* Wa_22010493298 */ 765 wa_masked_en(wal, HIZ_CHICKEN, 766 DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE); 767 } 768 769 static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine, 770 struct i915_wa_list *wal) 771 { 772 dg2_ctx_gt_tuning_init(engine, wal); 773 774 /* Wa_16011186671:dg2_g11 */ 775 if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { 776 wa_mcr_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH); 777 wa_mcr_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE); 778 } 779 780 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { 781 /* Wa_14010469329:dg2_g10 */ 782 wa_mcr_masked_en(wal, XEHP_COMMON_SLICE_CHICKEN3, 783 XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE); 784 785 /* 786 * Wa_22010465075:dg2_g10 787 * Wa_22010613112:dg2_g10 788 * Wa_14010698770:dg2_g10 789 */ 790 wa_mcr_masked_en(wal, XEHP_COMMON_SLICE_CHICKEN3, 791 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); 792 } 793 794 /* Wa_16013271637:dg2 */ 795 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1, 796 MSC_MSAA_REODER_BUF_BYPASS_DISABLE); 797 798 /* Wa_14014947963:dg2 */ 799 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) || 800 IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915)) 801 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000); 802 803 /* Wa_18018764978:dg2 */ 804 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_C0, STEP_FOREVER) || 805 IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915)) 806 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL); 807 808 /* Wa_15010599737:dg2 */ 809 wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN); 810 811 /* Wa_18019271663:dg2 */ 812 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE); 813 } 814 815 static void mtl_ctx_workarounds_init(struct intel_engine_cs *engine, 816 struct i915_wa_list *wal) 817 { 818 struct drm_i915_private *i915 = engine->i915; 819 820 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || 821 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) { 822 /* Wa_14014947963 */ 823 wa_masked_field_set(wal, VF_PREEMPTION, 824 PREEMPTION_VERTEX_COUNT, 0x4000); 825 826 /* Wa_16013271637 */ 827 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1, 828 MSC_MSAA_REODER_BUF_BYPASS_DISABLE); 829 830 /* Wa_18019627453 */ 831 wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS); 832 833 /* Wa_18018764978 */ 834 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL); 835 } 836 837 /* Wa_18019271663 */ 838 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE); 839 } 840 841 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine, 842 struct i915_wa_list *wal) 843 { 844 /* 845 * This is a "fake" workaround defined by software to ensure we 846 * maintain reliable, backward-compatible behavior for userspace with 847 * regards to how nested MI_BATCH_BUFFER_START commands are handled. 848 * 849 * The per-context setting of MI_MODE[12] determines whether the bits 850 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted 851 * in the traditional manner or whether they should instead use a new 852 * tgl+ meaning that breaks backward compatibility, but allows nesting 853 * into 3rd-level batchbuffers. When this new capability was first 854 * added in TGL, it remained off by default unless a context 855 * intentionally opted in to the new behavior. However Xe_HPG now 856 * flips this on by default and requires that we explicitly opt out if 857 * we don't want the new behavior. 858 * 859 * From a SW perspective, we want to maintain the backward-compatible 860 * behavior for userspace, so we'll apply a fake workaround to set it 861 * back to the legacy behavior on platforms where the hardware default 862 * is to break compatibility. At the moment there is no Linux 863 * userspace that utilizes third-level batchbuffers, so this will avoid 864 * userspace from needing to make any changes. using the legacy 865 * meaning is the correct thing to do. If/when we have userspace 866 * consumers that want to utilize third-level batch nesting, we can 867 * provide a context parameter to allow them to opt-in. 868 */ 869 wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN); 870 } 871 872 static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine, 873 struct i915_wa_list *wal) 874 { 875 u8 mocs; 876 877 /* 878 * Some blitter commands do not have a field for MOCS, those 879 * commands will use MOCS index pointed by BLIT_CCTL. 880 * BLIT_CCTL registers are needed to be programmed to un-cached. 881 */ 882 if (engine->class == COPY_ENGINE_CLASS) { 883 mocs = engine->gt->mocs.uc_index; 884 wa_write_clr_set(wal, 885 BLIT_CCTL(engine->mmio_base), 886 BLIT_CCTL_MASK, 887 BLIT_CCTL_MOCS(mocs, mocs)); 888 } 889 } 890 891 /* 892 * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround 893 * defined by the hardware team, but it programming general context registers. 894 * Adding those context register programming in context workaround 895 * allow us to use the wa framework for proper application and validation. 896 */ 897 static void 898 gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine, 899 struct i915_wa_list *wal) 900 { 901 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) 902 fakewa_disable_nestedbb_mode(engine, wal); 903 904 gen12_ctx_gt_mocs_init(engine, wal); 905 } 906 907 static void 908 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, 909 struct i915_wa_list *wal, 910 const char *name) 911 { 912 struct drm_i915_private *i915 = engine->i915; 913 914 wa_init_start(wal, engine->gt, name, engine->name); 915 916 /* Applies to all engines */ 917 /* 918 * Fake workarounds are not the actual workaround but 919 * programming of context registers using workaround framework. 920 */ 921 if (GRAPHICS_VER(i915) >= 12) 922 gen12_ctx_gt_fake_wa_init(engine, wal); 923 924 if (engine->class != RENDER_CLASS) 925 goto done; 926 927 if (IS_METEORLAKE(i915)) 928 mtl_ctx_workarounds_init(engine, wal); 929 else if (IS_PONTEVECCHIO(i915)) 930 ; /* noop; none at this time */ 931 else if (IS_DG2(i915)) 932 dg2_ctx_workarounds_init(engine, wal); 933 else if (IS_XEHPSDV(i915)) 934 ; /* noop; none at this time */ 935 else if (IS_DG1(i915)) 936 dg1_ctx_workarounds_init(engine, wal); 937 else if (GRAPHICS_VER(i915) == 12) 938 gen12_ctx_workarounds_init(engine, wal); 939 else if (GRAPHICS_VER(i915) == 11) 940 icl_ctx_workarounds_init(engine, wal); 941 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) 942 cfl_ctx_workarounds_init(engine, wal); 943 else if (IS_GEMINILAKE(i915)) 944 glk_ctx_workarounds_init(engine, wal); 945 else if (IS_KABYLAKE(i915)) 946 kbl_ctx_workarounds_init(engine, wal); 947 else if (IS_BROXTON(i915)) 948 bxt_ctx_workarounds_init(engine, wal); 949 else if (IS_SKYLAKE(i915)) 950 skl_ctx_workarounds_init(engine, wal); 951 else if (IS_CHERRYVIEW(i915)) 952 chv_ctx_workarounds_init(engine, wal); 953 else if (IS_BROADWELL(i915)) 954 bdw_ctx_workarounds_init(engine, wal); 955 else if (GRAPHICS_VER(i915) == 7) 956 gen7_ctx_workarounds_init(engine, wal); 957 else if (GRAPHICS_VER(i915) == 6) 958 gen6_ctx_workarounds_init(engine, wal); 959 else if (GRAPHICS_VER(i915) < 8) 960 ; 961 else 962 MISSING_CASE(GRAPHICS_VER(i915)); 963 964 done: 965 wa_init_finish(wal); 966 } 967 968 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) 969 { 970 __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context"); 971 } 972 973 int intel_engine_emit_ctx_wa(struct i915_request *rq) 974 { 975 struct i915_wa_list *wal = &rq->engine->ctx_wa_list; 976 struct i915_wa *wa; 977 unsigned int i; 978 u32 *cs; 979 int ret; 980 981 if (wal->count == 0) 982 return 0; 983 984 ret = rq->engine->emit_flush(rq, EMIT_BARRIER); 985 if (ret) 986 return ret; 987 988 cs = intel_ring_begin(rq, (wal->count * 2 + 2)); 989 if (IS_ERR(cs)) 990 return PTR_ERR(cs); 991 992 *cs++ = MI_LOAD_REGISTER_IMM(wal->count); 993 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 994 *cs++ = i915_mmio_reg_offset(wa->reg); 995 *cs++ = wa->set; 996 } 997 *cs++ = MI_NOOP; 998 999 intel_ring_advance(rq, cs); 1000 1001 ret = rq->engine->emit_flush(rq, EMIT_BARRIER); 1002 if (ret) 1003 return ret; 1004 1005 return 0; 1006 } 1007 1008 static void 1009 gen4_gt_workarounds_init(struct intel_gt *gt, 1010 struct i915_wa_list *wal) 1011 { 1012 /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */ 1013 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE); 1014 } 1015 1016 static void 1017 g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1018 { 1019 gen4_gt_workarounds_init(gt, wal); 1020 1021 /* WaDisableRenderCachePipelinedFlush:g4x,ilk */ 1022 wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE); 1023 } 1024 1025 static void 1026 ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1027 { 1028 g4x_gt_workarounds_init(gt, wal); 1029 1030 wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED); 1031 } 1032 1033 static void 1034 snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1035 { 1036 } 1037 1038 static void 1039 ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1040 { 1041 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 1042 wa_masked_dis(wal, 1043 GEN7_COMMON_SLICE_CHICKEN1, 1044 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 1045 1046 /* WaApplyL3ControlAndL3ChickenMode:ivb */ 1047 wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); 1048 wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); 1049 1050 /* WaForceL3Serialization:ivb */ 1051 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE); 1052 } 1053 1054 static void 1055 vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1056 { 1057 /* WaForceL3Serialization:vlv */ 1058 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE); 1059 1060 /* 1061 * WaIncreaseL3CreditsForVLVB0:vlv 1062 * This is the hardware default actually. 1063 */ 1064 wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); 1065 } 1066 1067 static void 1068 hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1069 { 1070 /* L3 caching of data atomics doesn't work -- disable it. */ 1071 wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 1072 1073 wa_add(wal, 1074 HSW_ROW_CHICKEN3, 0, 1075 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE), 1076 0 /* XXX does this reg exist? */, true); 1077 1078 /* WaVSRefCountFullforceMissDisable:hsw */ 1079 wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME); 1080 } 1081 1082 static void 1083 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) 1084 { 1085 const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu; 1086 unsigned int slice, subslice; 1087 u32 mcr, mcr_mask; 1088 1089 GEM_BUG_ON(GRAPHICS_VER(i915) != 9); 1090 1091 /* 1092 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml 1093 * Before any MMIO read into slice/subslice specific registers, MCR 1094 * packet control register needs to be programmed to point to any 1095 * enabled s/ss pair. Otherwise, incorrect values will be returned. 1096 * This means each subsequent MMIO read will be forwarded to an 1097 * specific s/ss combination, but this is OK since these registers 1098 * are consistent across s/ss in almost all cases. In the rare 1099 * occasions, such as INSTDONE, where this value is dependent 1100 * on s/ss combo, the read should be done with read_subslice_reg. 1101 */ 1102 slice = ffs(sseu->slice_mask) - 1; 1103 GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw)); 1104 subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice)); 1105 GEM_BUG_ON(!subslice); 1106 subslice--; 1107 1108 /* 1109 * We use GEN8_MCR..() macros to calculate the |mcr| value for 1110 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads 1111 */ 1112 mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); 1113 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; 1114 1115 drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr); 1116 1117 wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); 1118 } 1119 1120 static void 1121 gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1122 { 1123 struct drm_i915_private *i915 = gt->i915; 1124 1125 /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */ 1126 gen9_wa_init_mcr(i915, wal); 1127 1128 /* WaDisableKillLogic:bxt,skl,kbl */ 1129 if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915)) 1130 wa_write_or(wal, 1131 GAM_ECOCHK, 1132 ECOCHK_DIS_TLB); 1133 1134 if (HAS_LLC(i915)) { 1135 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl 1136 * 1137 * Must match Display Engine. See 1138 * WaCompressedResourceDisplayNewHashMode. 1139 */ 1140 wa_write_or(wal, 1141 MMCD_MISC_CTRL, 1142 MMCD_PCLA | MMCD_HOTSPOT_EN); 1143 } 1144 1145 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ 1146 wa_write_or(wal, 1147 GAM_ECOCHK, 1148 BDW_DISABLE_HDC_INVALIDATION); 1149 } 1150 1151 static void 1152 skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1153 { 1154 gen9_gt_workarounds_init(gt, wal); 1155 1156 /* WaDisableGafsUnitClkGating:skl */ 1157 wa_write_or(wal, 1158 GEN7_UCGCTL4, 1159 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1160 1161 /* WaInPlaceDecompressionHang:skl */ 1162 if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0)) 1163 wa_write_or(wal, 1164 GEN9_GAMT_ECO_REG_RW_IA, 1165 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1166 } 1167 1168 static void 1169 kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1170 { 1171 gen9_gt_workarounds_init(gt, wal); 1172 1173 /* WaDisableDynamicCreditSharing:kbl */ 1174 if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0)) 1175 wa_write_or(wal, 1176 GAMT_CHKN_BIT_REG, 1177 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 1178 1179 /* WaDisableGafsUnitClkGating:kbl */ 1180 wa_write_or(wal, 1181 GEN7_UCGCTL4, 1182 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1183 1184 /* WaInPlaceDecompressionHang:kbl */ 1185 wa_write_or(wal, 1186 GEN9_GAMT_ECO_REG_RW_IA, 1187 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1188 } 1189 1190 static void 1191 glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1192 { 1193 gen9_gt_workarounds_init(gt, wal); 1194 } 1195 1196 static void 1197 cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1198 { 1199 gen9_gt_workarounds_init(gt, wal); 1200 1201 /* WaDisableGafsUnitClkGating:cfl */ 1202 wa_write_or(wal, 1203 GEN7_UCGCTL4, 1204 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1205 1206 /* WaInPlaceDecompressionHang:cfl */ 1207 wa_write_or(wal, 1208 GEN9_GAMT_ECO_REG_RW_IA, 1209 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1210 } 1211 1212 static void __set_mcr_steering(struct i915_wa_list *wal, 1213 i915_reg_t steering_reg, 1214 unsigned int slice, unsigned int subslice) 1215 { 1216 u32 mcr, mcr_mask; 1217 1218 mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice); 1219 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; 1220 1221 wa_write_clr_set(wal, steering_reg, mcr_mask, mcr); 1222 } 1223 1224 static void debug_dump_steering(struct intel_gt *gt) 1225 { 1226 struct drm_printer p = drm_debug_printer("MCR Steering:"); 1227 1228 if (drm_debug_enabled(DRM_UT_DRIVER)) 1229 intel_gt_mcr_report_steering(&p, gt, false); 1230 } 1231 1232 static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal, 1233 unsigned int slice, unsigned int subslice) 1234 { 1235 __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice); 1236 1237 gt->default_steering.groupid = slice; 1238 gt->default_steering.instanceid = subslice; 1239 1240 debug_dump_steering(gt); 1241 } 1242 1243 static void 1244 icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) 1245 { 1246 const struct sseu_dev_info *sseu = >->info.sseu; 1247 unsigned int subslice; 1248 1249 GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11); 1250 GEM_BUG_ON(hweight8(sseu->slice_mask) > 1); 1251 1252 /* 1253 * Although a platform may have subslices, we need to always steer 1254 * reads to the lowest instance that isn't fused off. When Render 1255 * Power Gating is enabled, grabbing forcewake will only power up a 1256 * single subslice (the "minconfig") if there isn't a real workload 1257 * that needs to be run; this means that if we steer register reads to 1258 * one of the higher subslices, we run the risk of reading back 0's or 1259 * random garbage. 1260 */ 1261 subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0)); 1262 1263 /* 1264 * If the subslice we picked above also steers us to a valid L3 bank, 1265 * then we can just rely on the default steering and won't need to 1266 * worry about explicitly re-steering L3BANK reads later. 1267 */ 1268 if (gt->info.l3bank_mask & BIT(subslice)) 1269 gt->steering_table[L3BANK] = NULL; 1270 1271 __add_mcr_wa(gt, wal, 0, subslice); 1272 } 1273 1274 static void 1275 xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) 1276 { 1277 const struct sseu_dev_info *sseu = >->info.sseu; 1278 unsigned long slice, subslice = 0, slice_mask = 0; 1279 u32 lncf_mask = 0; 1280 int i; 1281 1282 /* 1283 * On Xe_HP the steering increases in complexity. There are now several 1284 * more units that require steering and we're not guaranteed to be able 1285 * to find a common setting for all of them. These are: 1286 * - GSLICE (fusable) 1287 * - DSS (sub-unit within gslice; fusable) 1288 * - L3 Bank (fusable) 1289 * - MSLICE (fusable) 1290 * - LNCF (sub-unit within mslice; always present if mslice is present) 1291 * 1292 * We'll do our default/implicit steering based on GSLICE (in the 1293 * sliceid field) and DSS (in the subsliceid field). If we can 1294 * find overlap between the valid MSLICE and/or LNCF values with 1295 * a suitable GSLICE, then we can just re-use the default value and 1296 * skip and explicit steering at runtime. 1297 * 1298 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find 1299 * a valid sliceid value. DSS steering is the only type of steering 1300 * that utilizes the 'subsliceid' bits. 1301 * 1302 * Also note that, even though the steering domain is called "GSlice" 1303 * and it is encoded in the register using the gslice format, the spec 1304 * says that the combined (geometry | compute) fuse should be used to 1305 * select the steering. 1306 */ 1307 1308 /* Find the potential gslice candidates */ 1309 slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask, 1310 GEN_DSS_PER_GSLICE); 1311 1312 /* 1313 * Find the potential LNCF candidates. Either LNCF within a valid 1314 * mslice is fine. 1315 */ 1316 for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES) 1317 lncf_mask |= (0x3 << (i * 2)); 1318 1319 /* 1320 * Are there any sliceid values that work for both GSLICE and LNCF 1321 * steering? 1322 */ 1323 if (slice_mask & lncf_mask) { 1324 slice_mask &= lncf_mask; 1325 gt->steering_table[LNCF] = NULL; 1326 } 1327 1328 /* How about sliceid values that also work for MSLICE steering? */ 1329 if (slice_mask & gt->info.mslice_mask) { 1330 slice_mask &= gt->info.mslice_mask; 1331 gt->steering_table[MSLICE] = NULL; 1332 } 1333 1334 if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0)) 1335 gt->steering_table[GAM] = NULL; 1336 1337 slice = __ffs(slice_mask); 1338 subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) % 1339 GEN_DSS_PER_GSLICE; 1340 1341 __add_mcr_wa(gt, wal, slice, subslice); 1342 1343 /* 1344 * SQIDI ranges are special because they use different steering 1345 * registers than everything else we work with. On XeHP SDV and 1346 * DG2-G10, any value in the steering registers will work fine since 1347 * all instances are present, but DG2-G11 only has SQIDI instances at 1348 * ID's 2 and 3, so we need to steer to one of those. For simplicity 1349 * we'll just steer to a hardcoded "2" since that value will work 1350 * everywhere. 1351 */ 1352 __set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2); 1353 __set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2); 1354 1355 /* 1356 * On DG2, GAM registers have a dedicated steering control register 1357 * and must always be programmed to a hardcoded groupid of "1." 1358 */ 1359 if (IS_DG2(gt->i915)) 1360 __set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0); 1361 } 1362 1363 static void 1364 pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) 1365 { 1366 unsigned int dss; 1367 1368 /* 1369 * Setup implicit steering for COMPUTE and DSS ranges to the first 1370 * non-fused-off DSS. All other types of MCR registers will be 1371 * explicitly steered. 1372 */ 1373 dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0); 1374 __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE); 1375 } 1376 1377 static void 1378 icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1379 { 1380 struct drm_i915_private *i915 = gt->i915; 1381 1382 icl_wa_init_mcr(gt, wal); 1383 1384 /* WaModifyGamTlbPartitioning:icl */ 1385 wa_write_clr_set(wal, 1386 GEN11_GACB_PERF_CTRL, 1387 GEN11_HASH_CTRL_MASK, 1388 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); 1389 1390 /* Wa_1405766107:icl 1391 * Formerly known as WaCL2SFHalfMaxAlloc 1392 */ 1393 wa_write_or(wal, 1394 GEN11_LSN_UNSLCVC, 1395 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | 1396 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); 1397 1398 /* Wa_220166154:icl 1399 * Formerly known as WaDisCtxReload 1400 */ 1401 wa_write_or(wal, 1402 GEN8_GAMW_ECO_DEV_RW_IA, 1403 GAMW_ECO_DEV_CTX_RELOAD_DISABLE); 1404 1405 /* Wa_1406463099:icl 1406 * Formerly known as WaGamTlbPendError 1407 */ 1408 wa_write_or(wal, 1409 GAMT_CHKN_BIT_REG, 1410 GAMT_CHKN_DISABLE_L3_COH_PIPE); 1411 1412 /* 1413 * Wa_1408615072:icl,ehl (vsunit) 1414 * Wa_1407596294:icl,ehl (hsunit) 1415 */ 1416 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, 1417 VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); 1418 1419 /* Wa_1407352427:icl,ehl */ 1420 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, 1421 PSDUNIT_CLKGATE_DIS); 1422 1423 /* Wa_1406680159:icl,ehl */ 1424 wa_mcr_write_or(wal, 1425 GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE, 1426 GWUNIT_CLKGATE_DIS); 1427 1428 /* Wa_1607087056:icl,ehl,jsl */ 1429 if (IS_ICELAKE(i915) || 1430 IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) 1431 wa_write_or(wal, 1432 GEN11_SLICE_UNIT_LEVEL_CLKGATE, 1433 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); 1434 1435 /* 1436 * This is not a documented workaround, but rather an optimization 1437 * to reduce sampler power. 1438 */ 1439 wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE); 1440 } 1441 1442 /* 1443 * Though there are per-engine instances of these registers, 1444 * they retain their value through engine resets and should 1445 * only be provided on the GT workaround list rather than 1446 * the engine-specific workaround list. 1447 */ 1448 static void 1449 wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal) 1450 { 1451 struct intel_engine_cs *engine; 1452 int id; 1453 1454 for_each_engine(engine, gt, id) { 1455 if (engine->class != VIDEO_DECODE_CLASS || 1456 (engine->instance % 2)) 1457 continue; 1458 1459 wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base), 1460 IECPUNIT_CLKGATE_DIS); 1461 } 1462 } 1463 1464 static void 1465 gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1466 { 1467 icl_wa_init_mcr(gt, wal); 1468 1469 /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */ 1470 wa_14011060649(gt, wal); 1471 1472 /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */ 1473 wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE); 1474 } 1475 1476 static void 1477 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1478 { 1479 gen12_gt_workarounds_init(gt, wal); 1480 1481 /* Wa_1409420604:dg1 */ 1482 wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2, 1483 CPSSUNIT_CLKGATE_DIS); 1484 1485 /* Wa_1408615072:dg1 */ 1486 /* Empirical testing shows this register is unaffected by engine reset. */ 1487 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); 1488 } 1489 1490 static void 1491 xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1492 { 1493 struct drm_i915_private *i915 = gt->i915; 1494 1495 xehp_init_mcr(gt, wal); 1496 1497 /* Wa_1409757795:xehpsdv */ 1498 wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB); 1499 1500 /* Wa_18011725039:xehpsdv */ 1501 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) { 1502 wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER); 1503 wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH); 1504 } 1505 1506 /* Wa_16011155590:xehpsdv */ 1507 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) 1508 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, 1509 TSGUNIT_CLKGATE_DIS); 1510 1511 /* Wa_14011780169:xehpsdv */ 1512 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) { 1513 wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | 1514 GAMTLBVDBOX7_CLKGATE_DIS | 1515 GAMTLBVDBOX6_CLKGATE_DIS | 1516 GAMTLBVDBOX5_CLKGATE_DIS | 1517 GAMTLBVDBOX4_CLKGATE_DIS | 1518 GAMTLBVDBOX3_CLKGATE_DIS | 1519 GAMTLBVDBOX2_CLKGATE_DIS | 1520 GAMTLBVDBOX1_CLKGATE_DIS | 1521 GAMTLBVDBOX0_CLKGATE_DIS | 1522 GAMTLBKCR_CLKGATE_DIS | 1523 GAMTLBGUC_CLKGATE_DIS | 1524 GAMTLBBLT_CLKGATE_DIS); 1525 wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | 1526 GAMTLBGFXA1_CLKGATE_DIS | 1527 GAMTLBCOMPA0_CLKGATE_DIS | 1528 GAMTLBCOMPA1_CLKGATE_DIS | 1529 GAMTLBCOMPB0_CLKGATE_DIS | 1530 GAMTLBCOMPB1_CLKGATE_DIS | 1531 GAMTLBCOMPC0_CLKGATE_DIS | 1532 GAMTLBCOMPC1_CLKGATE_DIS | 1533 GAMTLBCOMPD0_CLKGATE_DIS | 1534 GAMTLBCOMPD1_CLKGATE_DIS | 1535 GAMTLBMERT_CLKGATE_DIS | 1536 GAMTLBVEBOX3_CLKGATE_DIS | 1537 GAMTLBVEBOX2_CLKGATE_DIS | 1538 GAMTLBVEBOX1_CLKGATE_DIS | 1539 GAMTLBVEBOX0_CLKGATE_DIS); 1540 } 1541 1542 /* Wa_16012725990:xehpsdv */ 1543 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER)) 1544 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS); 1545 1546 /* Wa_14011060649:xehpsdv */ 1547 wa_14011060649(gt, wal); 1548 1549 /* Wa_14012362059:xehpsdv */ 1550 wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB); 1551 1552 /* Wa_14014368820:xehpsdv */ 1553 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL, 1554 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE); 1555 1556 /* Wa_14010670810:xehpsdv */ 1557 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); 1558 } 1559 1560 static void 1561 dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1562 { 1563 struct intel_engine_cs *engine; 1564 int id; 1565 1566 xehp_init_mcr(gt, wal); 1567 1568 /* Wa_14011060649:dg2 */ 1569 wa_14011060649(gt, wal); 1570 1571 /* 1572 * Although there are per-engine instances of these registers, 1573 * they technically exist outside the engine itself and are not 1574 * impacted by engine resets. Furthermore, they're part of the 1575 * GuC blacklist so trying to treat them as engine workarounds 1576 * will result in GuC initialization failure and a wedged GPU. 1577 */ 1578 for_each_engine(engine, gt, id) { 1579 if (engine->class != VIDEO_DECODE_CLASS) 1580 continue; 1581 1582 /* Wa_16010515920:dg2_g10 */ 1583 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) 1584 wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base), 1585 ALNUNIT_CLKGATE_DIS); 1586 } 1587 1588 if (IS_DG2_G10(gt->i915)) { 1589 /* Wa_22010523718:dg2 */ 1590 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, 1591 CG3DDISCFEG_CLKGATE_DIS); 1592 1593 /* Wa_14011006942:dg2 */ 1594 wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE, 1595 DSS_ROUTER_CLKGATE_DIS); 1596 } 1597 1598 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) || 1599 IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) { 1600 /* Wa_14012362059:dg2 */ 1601 wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB); 1602 } 1603 1604 if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) { 1605 /* Wa_14010948348:dg2_g10 */ 1606 wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS); 1607 1608 /* Wa_14011037102:dg2_g10 */ 1609 wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS); 1610 1611 /* Wa_14011371254:dg2_g10 */ 1612 wa_mcr_write_or(wal, XEHP_SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS); 1613 1614 /* Wa_14011431319:dg2_g10 */ 1615 wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | 1616 GAMTLBVDBOX7_CLKGATE_DIS | 1617 GAMTLBVDBOX6_CLKGATE_DIS | 1618 GAMTLBVDBOX5_CLKGATE_DIS | 1619 GAMTLBVDBOX4_CLKGATE_DIS | 1620 GAMTLBVDBOX3_CLKGATE_DIS | 1621 GAMTLBVDBOX2_CLKGATE_DIS | 1622 GAMTLBVDBOX1_CLKGATE_DIS | 1623 GAMTLBVDBOX0_CLKGATE_DIS | 1624 GAMTLBKCR_CLKGATE_DIS | 1625 GAMTLBGUC_CLKGATE_DIS | 1626 GAMTLBBLT_CLKGATE_DIS); 1627 wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | 1628 GAMTLBGFXA1_CLKGATE_DIS | 1629 GAMTLBCOMPA0_CLKGATE_DIS | 1630 GAMTLBCOMPA1_CLKGATE_DIS | 1631 GAMTLBCOMPB0_CLKGATE_DIS | 1632 GAMTLBCOMPB1_CLKGATE_DIS | 1633 GAMTLBCOMPC0_CLKGATE_DIS | 1634 GAMTLBCOMPC1_CLKGATE_DIS | 1635 GAMTLBCOMPD0_CLKGATE_DIS | 1636 GAMTLBCOMPD1_CLKGATE_DIS | 1637 GAMTLBMERT_CLKGATE_DIS | 1638 GAMTLBVEBOX3_CLKGATE_DIS | 1639 GAMTLBVEBOX2_CLKGATE_DIS | 1640 GAMTLBVEBOX1_CLKGATE_DIS | 1641 GAMTLBVEBOX0_CLKGATE_DIS); 1642 1643 /* Wa_14010569222:dg2_g10 */ 1644 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, 1645 GAMEDIA_CLKGATE_DIS); 1646 1647 /* Wa_14011028019:dg2_g10 */ 1648 wa_mcr_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS); 1649 1650 /* Wa_14010680813:dg2_g10 */ 1651 wa_mcr_write_or(wal, XEHP_GAMSTLB_CTRL, 1652 CONTROL_BLOCK_CLKGATE_DIS | 1653 EGRESS_BLOCK_CLKGATE_DIS | 1654 TAG_BLOCK_CLKGATE_DIS); 1655 } 1656 1657 /* Wa_14014830051:dg2 */ 1658 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN); 1659 1660 /* Wa_14015795083 */ 1661 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE); 1662 1663 /* Wa_18018781329 */ 1664 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB); 1665 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); 1666 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB); 1667 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB); 1668 1669 /* Wa_1509235366:dg2 */ 1670 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL, 1671 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE); 1672 1673 /* Wa_14010648519:dg2 */ 1674 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); 1675 } 1676 1677 static void 1678 pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1679 { 1680 pvc_init_mcr(gt, wal); 1681 1682 /* Wa_14015795083 */ 1683 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE); 1684 1685 /* Wa_18018781329 */ 1686 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB); 1687 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); 1688 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB); 1689 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB); 1690 1691 /* Wa_16016694945 */ 1692 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC); 1693 } 1694 1695 static void 1696 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1697 { 1698 /* Wa_14018778641 / Wa_18018781329 */ 1699 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB); 1700 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); 1701 1702 /* Wa_22016670082 */ 1703 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE); 1704 1705 if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) || 1706 IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0)) { 1707 /* Wa_14014830051 */ 1708 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN); 1709 1710 /* Wa_14015795083 */ 1711 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE); 1712 } 1713 1714 /* 1715 * Unlike older platforms, we no longer setup implicit steering here; 1716 * all MCR accesses are explicitly steered. 1717 */ 1718 debug_dump_steering(gt); 1719 } 1720 1721 static void 1722 xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1723 { 1724 /* 1725 * Wa_14018778641 1726 * Wa_18018781329 1727 * 1728 * Note that although these registers are MCR on the primary 1729 * GT, the media GT's versions are regular singleton registers. 1730 */ 1731 wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB); 1732 wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB); 1733 wa_write_or(wal, XELPMP_VEBX_MOD_CTRL, FORCE_MISS_FTLB); 1734 1735 debug_dump_steering(gt); 1736 } 1737 1738 /* 1739 * The bspec performance guide has recommended MMIO tuning settings. These 1740 * aren't truly "workarounds" but we want to program them through the 1741 * workaround infrastructure to make sure they're (re)applied at the proper 1742 * times. 1743 * 1744 * The programming in this function is for settings that persist through 1745 * engine resets and also are not part of any engine's register state context. 1746 * I.e., settings that only need to be re-applied in the event of a full GT 1747 * reset. 1748 */ 1749 static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal) 1750 { 1751 if (IS_PONTEVECCHIO(gt->i915)) { 1752 wa_mcr_write(wal, XEHPC_L3SCRUB, 1753 SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK); 1754 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN); 1755 } 1756 1757 if (IS_DG2(gt->i915)) { 1758 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); 1759 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS); 1760 } 1761 } 1762 1763 static void 1764 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) 1765 { 1766 struct drm_i915_private *i915 = gt->i915; 1767 1768 gt_tuning_settings(gt, wal); 1769 1770 if (gt->type == GT_MEDIA) { 1771 if (MEDIA_VER(i915) >= 13) 1772 xelpmp_gt_workarounds_init(gt, wal); 1773 else 1774 MISSING_CASE(MEDIA_VER(i915)); 1775 1776 return; 1777 } 1778 1779 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) 1780 xelpg_gt_workarounds_init(gt, wal); 1781 else if (IS_PONTEVECCHIO(i915)) 1782 pvc_gt_workarounds_init(gt, wal); 1783 else if (IS_DG2(i915)) 1784 dg2_gt_workarounds_init(gt, wal); 1785 else if (IS_XEHPSDV(i915)) 1786 xehpsdv_gt_workarounds_init(gt, wal); 1787 else if (IS_DG1(i915)) 1788 dg1_gt_workarounds_init(gt, wal); 1789 else if (GRAPHICS_VER(i915) == 12) 1790 gen12_gt_workarounds_init(gt, wal); 1791 else if (GRAPHICS_VER(i915) == 11) 1792 icl_gt_workarounds_init(gt, wal); 1793 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) 1794 cfl_gt_workarounds_init(gt, wal); 1795 else if (IS_GEMINILAKE(i915)) 1796 glk_gt_workarounds_init(gt, wal); 1797 else if (IS_KABYLAKE(i915)) 1798 kbl_gt_workarounds_init(gt, wal); 1799 else if (IS_BROXTON(i915)) 1800 gen9_gt_workarounds_init(gt, wal); 1801 else if (IS_SKYLAKE(i915)) 1802 skl_gt_workarounds_init(gt, wal); 1803 else if (IS_HASWELL(i915)) 1804 hsw_gt_workarounds_init(gt, wal); 1805 else if (IS_VALLEYVIEW(i915)) 1806 vlv_gt_workarounds_init(gt, wal); 1807 else if (IS_IVYBRIDGE(i915)) 1808 ivb_gt_workarounds_init(gt, wal); 1809 else if (GRAPHICS_VER(i915) == 6) 1810 snb_gt_workarounds_init(gt, wal); 1811 else if (GRAPHICS_VER(i915) == 5) 1812 ilk_gt_workarounds_init(gt, wal); 1813 else if (IS_G4X(i915)) 1814 g4x_gt_workarounds_init(gt, wal); 1815 else if (GRAPHICS_VER(i915) == 4) 1816 gen4_gt_workarounds_init(gt, wal); 1817 else if (GRAPHICS_VER(i915) <= 8) 1818 ; 1819 else 1820 MISSING_CASE(GRAPHICS_VER(i915)); 1821 } 1822 1823 void intel_gt_init_workarounds(struct intel_gt *gt) 1824 { 1825 struct i915_wa_list *wal = >->wa_list; 1826 1827 wa_init_start(wal, gt, "GT", "global"); 1828 gt_init_workarounds(gt, wal); 1829 wa_init_finish(wal); 1830 } 1831 1832 static enum forcewake_domains 1833 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal) 1834 { 1835 enum forcewake_domains fw = 0; 1836 struct i915_wa *wa; 1837 unsigned int i; 1838 1839 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) 1840 fw |= intel_uncore_forcewake_for_reg(uncore, 1841 wa->reg, 1842 FW_REG_READ | 1843 FW_REG_WRITE); 1844 1845 return fw; 1846 } 1847 1848 static bool 1849 wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur, 1850 const char *name, const char *from) 1851 { 1852 if ((cur ^ wa->set) & wa->read) { 1853 drm_err(>->i915->drm, 1854 "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n", 1855 name, from, i915_mmio_reg_offset(wa->reg), 1856 cur, cur & wa->read, wa->set & wa->read); 1857 1858 return false; 1859 } 1860 1861 return true; 1862 } 1863 1864 static void wa_list_apply(const struct i915_wa_list *wal) 1865 { 1866 struct intel_gt *gt = wal->gt; 1867 struct intel_uncore *uncore = gt->uncore; 1868 enum forcewake_domains fw; 1869 unsigned long flags; 1870 struct i915_wa *wa; 1871 unsigned int i; 1872 1873 if (!wal->count) 1874 return; 1875 1876 fw = wal_get_fw_for_rmw(uncore, wal); 1877 1878 intel_gt_mcr_lock(gt, &flags); 1879 spin_lock(&uncore->lock); 1880 intel_uncore_forcewake_get__locked(uncore, fw); 1881 1882 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 1883 u32 val, old = 0; 1884 1885 /* open-coded rmw due to steering */ 1886 if (wa->clr) 1887 old = wa->is_mcr ? 1888 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) : 1889 intel_uncore_read_fw(uncore, wa->reg); 1890 val = (old & ~wa->clr) | wa->set; 1891 if (val != old || !wa->clr) { 1892 if (wa->is_mcr) 1893 intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val); 1894 else 1895 intel_uncore_write_fw(uncore, wa->reg, val); 1896 } 1897 1898 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { 1899 u32 val = wa->is_mcr ? 1900 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) : 1901 intel_uncore_read_fw(uncore, wa->reg); 1902 1903 wa_verify(gt, wa, val, wal->name, "application"); 1904 } 1905 } 1906 1907 intel_uncore_forcewake_put__locked(uncore, fw); 1908 spin_unlock(&uncore->lock); 1909 intel_gt_mcr_unlock(gt, flags); 1910 } 1911 1912 void intel_gt_apply_workarounds(struct intel_gt *gt) 1913 { 1914 wa_list_apply(>->wa_list); 1915 } 1916 1917 static bool wa_list_verify(struct intel_gt *gt, 1918 const struct i915_wa_list *wal, 1919 const char *from) 1920 { 1921 struct intel_uncore *uncore = gt->uncore; 1922 struct i915_wa *wa; 1923 enum forcewake_domains fw; 1924 unsigned long flags; 1925 unsigned int i; 1926 bool ok = true; 1927 1928 fw = wal_get_fw_for_rmw(uncore, wal); 1929 1930 intel_gt_mcr_lock(gt, &flags); 1931 spin_lock(&uncore->lock); 1932 intel_uncore_forcewake_get__locked(uncore, fw); 1933 1934 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) 1935 ok &= wa_verify(wal->gt, wa, wa->is_mcr ? 1936 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) : 1937 intel_uncore_read_fw(uncore, wa->reg), 1938 wal->name, from); 1939 1940 intel_uncore_forcewake_put__locked(uncore, fw); 1941 spin_unlock(&uncore->lock); 1942 intel_gt_mcr_unlock(gt, flags); 1943 1944 return ok; 1945 } 1946 1947 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) 1948 { 1949 return wa_list_verify(gt, >->wa_list, from); 1950 } 1951 1952 __maybe_unused 1953 static bool is_nonpriv_flags_valid(u32 flags) 1954 { 1955 /* Check only valid flag bits are set */ 1956 if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID) 1957 return false; 1958 1959 /* NB: Only 3 out of 4 enum values are valid for access field */ 1960 if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == 1961 RING_FORCE_TO_NONPRIV_ACCESS_INVALID) 1962 return false; 1963 1964 return true; 1965 } 1966 1967 static void 1968 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags) 1969 { 1970 struct i915_wa wa = { 1971 .reg = reg 1972 }; 1973 1974 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) 1975 return; 1976 1977 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags))) 1978 return; 1979 1980 wa.reg.reg |= flags; 1981 _wa_add(wal, &wa); 1982 } 1983 1984 static void 1985 whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags) 1986 { 1987 struct i915_wa wa = { 1988 .mcr_reg = reg, 1989 .is_mcr = 1, 1990 }; 1991 1992 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) 1993 return; 1994 1995 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags))) 1996 return; 1997 1998 wa.mcr_reg.reg |= flags; 1999 _wa_add(wal, &wa); 2000 } 2001 2002 static void 2003 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) 2004 { 2005 whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW); 2006 } 2007 2008 static void 2009 whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg) 2010 { 2011 whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW); 2012 } 2013 2014 static void gen9_whitelist_build(struct i915_wa_list *w) 2015 { 2016 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ 2017 whitelist_reg(w, GEN9_CTX_PREEMPT_REG); 2018 2019 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ 2020 whitelist_reg(w, GEN8_CS_CHICKEN1); 2021 2022 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */ 2023 whitelist_reg(w, GEN8_HDC_CHICKEN1); 2024 2025 /* WaSendPushConstantsFromMMIO:skl,bxt */ 2026 whitelist_reg(w, COMMON_SLICE_CHICKEN2); 2027 } 2028 2029 static void skl_whitelist_build(struct intel_engine_cs *engine) 2030 { 2031 struct i915_wa_list *w = &engine->whitelist; 2032 2033 if (engine->class != RENDER_CLASS) 2034 return; 2035 2036 gen9_whitelist_build(w); 2037 2038 /* WaDisableLSQCROPERFforOCL:skl */ 2039 whitelist_mcr_reg(w, GEN8_L3SQCREG4); 2040 } 2041 2042 static void bxt_whitelist_build(struct intel_engine_cs *engine) 2043 { 2044 if (engine->class != RENDER_CLASS) 2045 return; 2046 2047 gen9_whitelist_build(&engine->whitelist); 2048 } 2049 2050 static void kbl_whitelist_build(struct intel_engine_cs *engine) 2051 { 2052 struct i915_wa_list *w = &engine->whitelist; 2053 2054 if (engine->class != RENDER_CLASS) 2055 return; 2056 2057 gen9_whitelist_build(w); 2058 2059 /* WaDisableLSQCROPERFforOCL:kbl */ 2060 whitelist_mcr_reg(w, GEN8_L3SQCREG4); 2061 } 2062 2063 static void glk_whitelist_build(struct intel_engine_cs *engine) 2064 { 2065 struct i915_wa_list *w = &engine->whitelist; 2066 2067 if (engine->class != RENDER_CLASS) 2068 return; 2069 2070 gen9_whitelist_build(w); 2071 2072 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */ 2073 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); 2074 } 2075 2076 static void cfl_whitelist_build(struct intel_engine_cs *engine) 2077 { 2078 struct i915_wa_list *w = &engine->whitelist; 2079 2080 if (engine->class != RENDER_CLASS) 2081 return; 2082 2083 gen9_whitelist_build(w); 2084 2085 /* 2086 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml 2087 * 2088 * This covers 4 register which are next to one another : 2089 * - PS_INVOCATION_COUNT 2090 * - PS_INVOCATION_COUNT_UDW 2091 * - PS_DEPTH_COUNT 2092 * - PS_DEPTH_COUNT_UDW 2093 */ 2094 whitelist_reg_ext(w, PS_INVOCATION_COUNT, 2095 RING_FORCE_TO_NONPRIV_ACCESS_RD | 2096 RING_FORCE_TO_NONPRIV_RANGE_4); 2097 } 2098 2099 static void allow_read_ctx_timestamp(struct intel_engine_cs *engine) 2100 { 2101 struct i915_wa_list *w = &engine->whitelist; 2102 2103 if (engine->class != RENDER_CLASS) 2104 whitelist_reg_ext(w, 2105 RING_CTX_TIMESTAMP(engine->mmio_base), 2106 RING_FORCE_TO_NONPRIV_ACCESS_RD); 2107 } 2108 2109 static void cml_whitelist_build(struct intel_engine_cs *engine) 2110 { 2111 allow_read_ctx_timestamp(engine); 2112 2113 cfl_whitelist_build(engine); 2114 } 2115 2116 static void icl_whitelist_build(struct intel_engine_cs *engine) 2117 { 2118 struct i915_wa_list *w = &engine->whitelist; 2119 2120 allow_read_ctx_timestamp(engine); 2121 2122 switch (engine->class) { 2123 case RENDER_CLASS: 2124 /* WaAllowUMDToModifyHalfSliceChicken7:icl */ 2125 whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7); 2126 2127 /* WaAllowUMDToModifySamplerMode:icl */ 2128 whitelist_mcr_reg(w, GEN10_SAMPLER_MODE); 2129 2130 /* WaEnableStateCacheRedirectToCS:icl */ 2131 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); 2132 2133 /* 2134 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl 2135 * 2136 * This covers 4 register which are next to one another : 2137 * - PS_INVOCATION_COUNT 2138 * - PS_INVOCATION_COUNT_UDW 2139 * - PS_DEPTH_COUNT 2140 * - PS_DEPTH_COUNT_UDW 2141 */ 2142 whitelist_reg_ext(w, PS_INVOCATION_COUNT, 2143 RING_FORCE_TO_NONPRIV_ACCESS_RD | 2144 RING_FORCE_TO_NONPRIV_RANGE_4); 2145 break; 2146 2147 case VIDEO_DECODE_CLASS: 2148 /* hucStatusRegOffset */ 2149 whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base), 2150 RING_FORCE_TO_NONPRIV_ACCESS_RD); 2151 /* hucUKernelHdrInfoRegOffset */ 2152 whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base), 2153 RING_FORCE_TO_NONPRIV_ACCESS_RD); 2154 /* hucStatus2RegOffset */ 2155 whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base), 2156 RING_FORCE_TO_NONPRIV_ACCESS_RD); 2157 break; 2158 2159 default: 2160 break; 2161 } 2162 } 2163 2164 static void tgl_whitelist_build(struct intel_engine_cs *engine) 2165 { 2166 struct i915_wa_list *w = &engine->whitelist; 2167 2168 allow_read_ctx_timestamp(engine); 2169 2170 switch (engine->class) { 2171 case RENDER_CLASS: 2172 /* 2173 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl 2174 * Wa_1408556865:tgl 2175 * 2176 * This covers 4 registers which are next to one another : 2177 * - PS_INVOCATION_COUNT 2178 * - PS_INVOCATION_COUNT_UDW 2179 * - PS_DEPTH_COUNT 2180 * - PS_DEPTH_COUNT_UDW 2181 */ 2182 whitelist_reg_ext(w, PS_INVOCATION_COUNT, 2183 RING_FORCE_TO_NONPRIV_ACCESS_RD | 2184 RING_FORCE_TO_NONPRIV_RANGE_4); 2185 2186 /* 2187 * Wa_1808121037:tgl 2188 * Wa_14012131227:dg1 2189 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p 2190 */ 2191 whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1); 2192 2193 /* Wa_1806527549:tgl */ 2194 whitelist_reg(w, HIZ_CHICKEN); 2195 2196 /* Required by recommended tuning setting (not a workaround) */ 2197 whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3); 2198 2199 break; 2200 default: 2201 break; 2202 } 2203 } 2204 2205 static void dg2_whitelist_build(struct intel_engine_cs *engine) 2206 { 2207 struct i915_wa_list *w = &engine->whitelist; 2208 2209 switch (engine->class) { 2210 case RENDER_CLASS: 2211 /* 2212 * Wa_1507100340:dg2_g10 2213 * 2214 * This covers 4 registers which are next to one another : 2215 * - PS_INVOCATION_COUNT 2216 * - PS_INVOCATION_COUNT_UDW 2217 * - PS_DEPTH_COUNT 2218 * - PS_DEPTH_COUNT_UDW 2219 */ 2220 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) 2221 whitelist_reg_ext(w, PS_INVOCATION_COUNT, 2222 RING_FORCE_TO_NONPRIV_ACCESS_RD | 2223 RING_FORCE_TO_NONPRIV_RANGE_4); 2224 2225 /* Required by recommended tuning setting (not a workaround) */ 2226 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3); 2227 2228 break; 2229 case COMPUTE_CLASS: 2230 /* Wa_16011157294:dg2_g10 */ 2231 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) 2232 whitelist_reg(w, GEN9_CTX_PREEMPT_REG); 2233 break; 2234 default: 2235 break; 2236 } 2237 } 2238 2239 static void blacklist_trtt(struct intel_engine_cs *engine) 2240 { 2241 struct i915_wa_list *w = &engine->whitelist; 2242 2243 /* 2244 * Prevent read/write access to [0x4400, 0x4600) which covers 2245 * the TRTT range across all engines. Note that normally userspace 2246 * cannot access the other engines' trtt control, but for simplicity 2247 * we cover the entire range on each engine. 2248 */ 2249 whitelist_reg_ext(w, _MMIO(0x4400), 2250 RING_FORCE_TO_NONPRIV_DENY | 2251 RING_FORCE_TO_NONPRIV_RANGE_64); 2252 whitelist_reg_ext(w, _MMIO(0x4500), 2253 RING_FORCE_TO_NONPRIV_DENY | 2254 RING_FORCE_TO_NONPRIV_RANGE_64); 2255 } 2256 2257 static void pvc_whitelist_build(struct intel_engine_cs *engine) 2258 { 2259 /* Wa_16014440446:pvc */ 2260 blacklist_trtt(engine); 2261 } 2262 2263 static void mtl_whitelist_build(struct intel_engine_cs *engine) 2264 { 2265 struct i915_wa_list *w = &engine->whitelist; 2266 2267 switch (engine->class) { 2268 case RENDER_CLASS: 2269 /* Required by recommended tuning setting (not a workaround) */ 2270 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3); 2271 2272 break; 2273 default: 2274 break; 2275 } 2276 } 2277 2278 void intel_engine_init_whitelist(struct intel_engine_cs *engine) 2279 { 2280 struct drm_i915_private *i915 = engine->i915; 2281 struct i915_wa_list *w = &engine->whitelist; 2282 2283 wa_init_start(w, engine->gt, "whitelist", engine->name); 2284 2285 if (IS_METEORLAKE(i915)) 2286 mtl_whitelist_build(engine); 2287 else if (IS_PONTEVECCHIO(i915)) 2288 pvc_whitelist_build(engine); 2289 else if (IS_DG2(i915)) 2290 dg2_whitelist_build(engine); 2291 else if (IS_XEHPSDV(i915)) 2292 ; /* none needed */ 2293 else if (GRAPHICS_VER(i915) == 12) 2294 tgl_whitelist_build(engine); 2295 else if (GRAPHICS_VER(i915) == 11) 2296 icl_whitelist_build(engine); 2297 else if (IS_COMETLAKE(i915)) 2298 cml_whitelist_build(engine); 2299 else if (IS_COFFEELAKE(i915)) 2300 cfl_whitelist_build(engine); 2301 else if (IS_GEMINILAKE(i915)) 2302 glk_whitelist_build(engine); 2303 else if (IS_KABYLAKE(i915)) 2304 kbl_whitelist_build(engine); 2305 else if (IS_BROXTON(i915)) 2306 bxt_whitelist_build(engine); 2307 else if (IS_SKYLAKE(i915)) 2308 skl_whitelist_build(engine); 2309 else if (GRAPHICS_VER(i915) <= 8) 2310 ; 2311 else 2312 MISSING_CASE(GRAPHICS_VER(i915)); 2313 2314 wa_init_finish(w); 2315 } 2316 2317 void intel_engine_apply_whitelist(struct intel_engine_cs *engine) 2318 { 2319 const struct i915_wa_list *wal = &engine->whitelist; 2320 struct intel_uncore *uncore = engine->uncore; 2321 const u32 base = engine->mmio_base; 2322 struct i915_wa *wa; 2323 unsigned int i; 2324 2325 if (!wal->count) 2326 return; 2327 2328 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) 2329 intel_uncore_write(uncore, 2330 RING_FORCE_TO_NONPRIV(base, i), 2331 i915_mmio_reg_offset(wa->reg)); 2332 2333 /* And clear the rest just in case of garbage */ 2334 for (; i < RING_MAX_NONPRIV_SLOTS; i++) 2335 intel_uncore_write(uncore, 2336 RING_FORCE_TO_NONPRIV(base, i), 2337 i915_mmio_reg_offset(RING_NOPID(base))); 2338 } 2339 2340 /* 2341 * engine_fake_wa_init(), a place holder to program the registers 2342 * which are not part of an official workaround defined by the 2343 * hardware team. 2344 * Adding programming of those register inside workaround will 2345 * allow utilizing wa framework to proper application and verification. 2346 */ 2347 static void 2348 engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2349 { 2350 u8 mocs_w, mocs_r; 2351 2352 /* 2353 * RING_CMD_CCTL specifies the default MOCS entry that will be used 2354 * by the command streamer when executing commands that don't have 2355 * a way to explicitly specify a MOCS setting. The default should 2356 * usually reference whichever MOCS entry corresponds to uncached 2357 * behavior, although use of a WB cached entry is recommended by the 2358 * spec in certain circumstances on specific platforms. 2359 */ 2360 if (GRAPHICS_VER(engine->i915) >= 12) { 2361 mocs_r = engine->gt->mocs.uc_index; 2362 mocs_w = engine->gt->mocs.uc_index; 2363 2364 if (HAS_L3_CCS_READ(engine->i915) && 2365 engine->class == COMPUTE_CLASS) { 2366 mocs_r = engine->gt->mocs.wb_index; 2367 2368 /* 2369 * Even on the few platforms where MOCS 0 is a 2370 * legitimate table entry, it's never the correct 2371 * setting to use here; we can assume the MOCS init 2372 * just forgot to initialize wb_index. 2373 */ 2374 drm_WARN_ON(&engine->i915->drm, mocs_r == 0); 2375 } 2376 2377 wa_masked_field_set(wal, 2378 RING_CMD_CCTL(engine->mmio_base), 2379 CMD_CCTL_MOCS_MASK, 2380 CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r)); 2381 } 2382 } 2383 2384 static bool needs_wa_1308578152(struct intel_engine_cs *engine) 2385 { 2386 return intel_sseu_find_first_xehp_dss(&engine->gt->info.sseu, 0, 0) >= 2387 GEN_DSS_PER_GSLICE; 2388 } 2389 2390 static void 2391 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2392 { 2393 struct drm_i915_private *i915 = engine->i915; 2394 2395 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || 2396 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) { 2397 /* Wa_22014600077 */ 2398 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, 2399 ENABLE_EU_COUNT_FOR_TDL_FLUSH); 2400 } 2401 2402 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || 2403 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) || 2404 IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) || 2405 IS_DG2_G11(i915) || IS_DG2_G12(i915)) { 2406 /* Wa_1509727124 */ 2407 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE, 2408 SC_DISABLE_POWER_OPTIMIZATION_EBB); 2409 } 2410 2411 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) || 2412 IS_DG2_G11(i915) || IS_DG2_G12(i915) || 2413 IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0)) { 2414 /* Wa_22012856258 */ 2415 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, 2416 GEN12_DISABLE_READ_SUPPRESSION); 2417 } 2418 2419 if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) { 2420 /* Wa_14013392000:dg2_g11 */ 2421 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE); 2422 } 2423 2424 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) || 2425 IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) { 2426 /* Wa_14012419201:dg2 */ 2427 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, 2428 GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX); 2429 } 2430 2431 /* Wa_1308578152:dg2_g10 when first gslice is fused off */ 2432 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) && 2433 needs_wa_1308578152(engine)) { 2434 wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON, 2435 GEN12_REPLAY_MODE_GRANULARITY); 2436 } 2437 2438 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) || 2439 IS_DG2_G11(i915) || IS_DG2_G12(i915)) { 2440 /* 2441 * Wa_22010960976:dg2 2442 * Wa_14013347512:dg2 2443 */ 2444 wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0, 2445 LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK); 2446 } 2447 2448 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) { 2449 /* 2450 * Wa_1608949956:dg2_g10 2451 * Wa_14010198302:dg2_g10 2452 */ 2453 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, 2454 MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE); 2455 } 2456 2457 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) 2458 /* Wa_22010430635:dg2 */ 2459 wa_mcr_masked_en(wal, 2460 GEN9_ROW_CHICKEN4, 2461 GEN12_DISABLE_GRF_CLEAR); 2462 2463 /* Wa_14013202645:dg2 */ 2464 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) || 2465 IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) 2466 wa_mcr_write_or(wal, RT_CTRL, DIS_NULL_QUERY); 2467 2468 /* Wa_22012532006:dg2 */ 2469 if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) || 2470 IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) 2471 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, 2472 DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA); 2473 2474 if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER) || 2475 IS_DG2_G10(i915)) { 2476 /* Wa_22014600077:dg2 */ 2477 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0, 2478 _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH), 2479 0 /* Wa_14012342262 write-only reg, so skip verification */, 2480 true); 2481 } 2482 2483 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) || 2484 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { 2485 /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */ 2486 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ); 2487 2488 /* 2489 * Wa_1407928979:tgl A* 2490 * Wa_18011464164:tgl[B0+],dg1[B0+] 2491 * Wa_22010931296:tgl[B0+],dg1[B0+] 2492 * Wa_14010919138:rkl,dg1,adl-s,adl-p 2493 */ 2494 wa_write_or(wal, GEN7_FF_THREAD_MODE, 2495 GEN12_FF_TESSELATION_DOP_GATE_DISABLE); 2496 } 2497 2498 if (IS_ALDERLAKE_P(i915) || IS_DG2(i915) || IS_ALDERLAKE_S(i915) || 2499 IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { 2500 /* 2501 * Wa_1606700617:tgl,dg1,adl-p 2502 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p 2503 * Wa_14010826681:tgl,dg1,rkl,adl-p 2504 * Wa_18019627453:dg2 2505 */ 2506 wa_masked_en(wal, 2507 GEN9_CS_DEBUG_MODE1, 2508 FF_DOP_CLOCK_GATE_DISABLE); 2509 } 2510 2511 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || 2512 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { 2513 /* Wa_1409804808 */ 2514 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, 2515 GEN12_PUSH_CONST_DEREF_HOLD_DIS); 2516 2517 /* Wa_14010229206 */ 2518 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); 2519 } 2520 2521 if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) { 2522 /* 2523 * Wa_1607297627 2524 * 2525 * On TGL and RKL there are multiple entries for this WA in the 2526 * BSpec; some indicate this is an A0-only WA, others indicate 2527 * it applies to all steppings so we trust the "all steppings." 2528 */ 2529 wa_masked_en(wal, 2530 RING_PSMI_CTL(RENDER_RING_BASE), 2531 GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | 2532 GEN8_RC_SEMA_IDLE_MSG_DISABLE); 2533 } 2534 2535 if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || 2536 IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) { 2537 /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */ 2538 wa_mcr_masked_en(wal, 2539 GEN10_SAMPLER_MODE, 2540 ENABLE_SMALLPL); 2541 } 2542 2543 if (GRAPHICS_VER(i915) == 11) { 2544 /* This is not an Wa. Enable for better image quality */ 2545 wa_masked_en(wal, 2546 _3D_CHICKEN3, 2547 _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); 2548 2549 /* 2550 * Wa_1405543622:icl 2551 * Formerly known as WaGAPZPriorityScheme 2552 */ 2553 wa_write_or(wal, 2554 GEN8_GARBCNTL, 2555 GEN11_ARBITRATION_PRIO_ORDER_MASK); 2556 2557 /* 2558 * Wa_1604223664:icl 2559 * Formerly known as WaL3BankAddressHashing 2560 */ 2561 wa_write_clr_set(wal, 2562 GEN8_GARBCNTL, 2563 GEN11_HASH_CTRL_EXCL_MASK, 2564 GEN11_HASH_CTRL_EXCL_BIT0); 2565 wa_write_clr_set(wal, 2566 GEN11_GLBLINVL, 2567 GEN11_BANK_HASH_ADDR_EXCL_MASK, 2568 GEN11_BANK_HASH_ADDR_EXCL_BIT0); 2569 2570 /* 2571 * Wa_1405733216:icl 2572 * Formerly known as WaDisableCleanEvicts 2573 */ 2574 wa_mcr_write_or(wal, 2575 GEN8_L3SQCREG4, 2576 GEN11_LQSC_CLEAN_EVICT_DISABLE); 2577 2578 /* Wa_1606682166:icl */ 2579 wa_write_or(wal, 2580 GEN7_SARCHKMD, 2581 GEN7_DISABLE_SAMPLER_PREFETCH); 2582 2583 /* Wa_1409178092:icl */ 2584 wa_mcr_write_clr_set(wal, 2585 GEN11_SCRATCH2, 2586 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE, 2587 0); 2588 2589 /* WaEnable32PlaneMode:icl */ 2590 wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS, 2591 GEN11_ENABLE_32_PLANE_MODE); 2592 2593 /* 2594 * Wa_1408767742:icl[a2..forever],ehl[all] 2595 * Wa_1605460711:icl[a0..c0] 2596 */ 2597 wa_write_or(wal, 2598 GEN7_FF_THREAD_MODE, 2599 GEN12_FF_TESSELATION_DOP_GATE_DISABLE); 2600 2601 /* Wa_22010271021 */ 2602 wa_masked_en(wal, 2603 GEN9_CS_DEBUG_MODE1, 2604 FF_DOP_CLOCK_GATE_DISABLE); 2605 } 2606 2607 /* 2608 * Intel platforms that support fine-grained preemption (i.e., gen9 and 2609 * beyond) allow the kernel-mode driver to choose between two different 2610 * options for controlling preemption granularity and behavior. 2611 * 2612 * Option 1 (hardware default): 2613 * Preemption settings are controlled in a global manner via 2614 * kernel-only register CS_DEBUG_MODE1 (0x20EC). Any granularity 2615 * and settings chosen by the kernel-mode driver will apply to all 2616 * userspace clients. 2617 * 2618 * Option 2: 2619 * Preemption settings are controlled on a per-context basis via 2620 * register CS_CHICKEN1 (0x2580). CS_CHICKEN1 is saved/restored on 2621 * context switch and is writable by userspace (e.g., via 2622 * MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer) 2623 * which allows different userspace drivers/clients to select 2624 * different settings, or to change those settings on the fly in 2625 * response to runtime needs. This option was known by name 2626 * "FtrPerCtxtPreemptionGranularityControl" at one time, although 2627 * that name is somewhat misleading as other non-granularity 2628 * preemption settings are also impacted by this decision. 2629 * 2630 * On Linux, our policy has always been to let userspace drivers 2631 * control preemption granularity/settings (Option 2). This was 2632 * originally mandatory on gen9 to prevent ABI breakage (old gen9 2633 * userspace developed before object-level preemption was enabled would 2634 * not behave well if i915 were to go with Option 1 and enable that 2635 * preemption in a global manner). On gen9 each context would have 2636 * object-level preemption disabled by default (see 2637 * WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but 2638 * userspace drivers could opt-in to object-level preemption as they 2639 * saw fit. For post-gen9 platforms, we continue to utilize Option 2; 2640 * even though it is no longer necessary for ABI compatibility when 2641 * enabling a new platform, it does ensure that userspace will be able 2642 * to implement any workarounds that show up requiring temporary 2643 * adjustments to preemption behavior at runtime. 2644 * 2645 * Notes/Workarounds: 2646 * - Wa_14015141709: On DG2 and early steppings of MTL, 2647 * CS_CHICKEN1[0] does not disable object-level preemption as 2648 * it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been 2649 * using Option 1). Effectively this means userspace is unable 2650 * to disable object-level preemption on these platforms/steppings 2651 * despite the setting here. 2652 * 2653 * - Wa_16013994831: May require that userspace program 2654 * CS_CHICKEN1[10] when certain runtime conditions are true. 2655 * Userspace requires Option 2 to be in effect for their update of 2656 * CS_CHICKEN1[10] to be effective. 2657 * 2658 * Other workarounds may appear in the future that will also require 2659 * Option 2 behavior to allow proper userspace implementation. 2660 */ 2661 if (GRAPHICS_VER(i915) >= 9) 2662 wa_masked_en(wal, 2663 GEN7_FF_SLICE_CS_CHICKEN1, 2664 GEN9_FFSC_PERCTX_PREEMPT_CTRL); 2665 2666 if (IS_SKYLAKE(i915) || 2667 IS_KABYLAKE(i915) || 2668 IS_COFFEELAKE(i915) || 2669 IS_COMETLAKE(i915)) { 2670 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */ 2671 wa_write_or(wal, 2672 GEN8_GARBCNTL, 2673 GEN9_GAPS_TSV_CREDIT_DISABLE); 2674 } 2675 2676 if (IS_BROXTON(i915)) { 2677 /* WaDisablePooledEuLoadBalancingFix:bxt */ 2678 wa_masked_en(wal, 2679 FF_SLICE_CS_CHICKEN2, 2680 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); 2681 } 2682 2683 if (GRAPHICS_VER(i915) == 9) { 2684 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ 2685 wa_masked_en(wal, 2686 GEN9_CSFE_CHICKEN1_RCS, 2687 GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE); 2688 2689 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ 2690 wa_mcr_write_or(wal, 2691 BDW_SCRATCH1, 2692 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 2693 2694 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ 2695 if (IS_GEN9_LP(i915)) 2696 wa_mcr_write_clr_set(wal, 2697 GEN8_L3SQCREG1, 2698 L3_PRIO_CREDITS_MASK, 2699 L3_GENERAL_PRIO_CREDITS(62) | 2700 L3_HIGH_PRIO_CREDITS(2)); 2701 2702 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ 2703 wa_mcr_write_or(wal, 2704 GEN8_L3SQCREG4, 2705 GEN8_LQSC_FLUSH_COHERENT_LINES); 2706 2707 /* Disable atomics in L3 to prevent unrecoverable hangs */ 2708 wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1, 2709 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0); 2710 wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4, 2711 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0); 2712 wa_mcr_write_clr_set(wal, GEN9_SCRATCH1, 2713 EVICTION_PERF_FIX_ENABLE, 0); 2714 } 2715 2716 if (IS_HASWELL(i915)) { 2717 /* WaSampleCChickenBitEnable:hsw */ 2718 wa_masked_en(wal, 2719 HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE); 2720 2721 wa_masked_dis(wal, 2722 CACHE_MODE_0_GEN7, 2723 /* enable HiZ Raw Stall Optimization */ 2724 HIZ_RAW_STALL_OPT_DISABLE); 2725 } 2726 2727 if (IS_VALLEYVIEW(i915)) { 2728 /* WaDisableEarlyCull:vlv */ 2729 wa_masked_en(wal, 2730 _3D_CHICKEN3, 2731 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL); 2732 2733 /* 2734 * WaVSThreadDispatchOverride:ivb,vlv 2735 * 2736 * This actually overrides the dispatch 2737 * mode for all thread types. 2738 */ 2739 wa_write_clr_set(wal, 2740 GEN7_FF_THREAD_MODE, 2741 GEN7_FF_SCHED_MASK, 2742 GEN7_FF_TS_SCHED_HW | 2743 GEN7_FF_VS_SCHED_HW | 2744 GEN7_FF_DS_SCHED_HW); 2745 2746 /* WaPsdDispatchEnable:vlv */ 2747 /* WaDisablePSDDualDispatchEnable:vlv */ 2748 wa_masked_en(wal, 2749 GEN7_HALF_SLICE_CHICKEN1, 2750 GEN7_MAX_PS_THREAD_DEP | 2751 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); 2752 } 2753 2754 if (IS_IVYBRIDGE(i915)) { 2755 /* WaDisableEarlyCull:ivb */ 2756 wa_masked_en(wal, 2757 _3D_CHICKEN3, 2758 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL); 2759 2760 if (0) { /* causes HiZ corruption on ivb:gt1 */ 2761 /* enable HiZ Raw Stall Optimization */ 2762 wa_masked_dis(wal, 2763 CACHE_MODE_0_GEN7, 2764 HIZ_RAW_STALL_OPT_DISABLE); 2765 } 2766 2767 /* 2768 * WaVSThreadDispatchOverride:ivb,vlv 2769 * 2770 * This actually overrides the dispatch 2771 * mode for all thread types. 2772 */ 2773 wa_write_clr_set(wal, 2774 GEN7_FF_THREAD_MODE, 2775 GEN7_FF_SCHED_MASK, 2776 GEN7_FF_TS_SCHED_HW | 2777 GEN7_FF_VS_SCHED_HW | 2778 GEN7_FF_DS_SCHED_HW); 2779 2780 /* WaDisablePSDDualDispatchEnable:ivb */ 2781 if (IS_IVB_GT1(i915)) 2782 wa_masked_en(wal, 2783 GEN7_HALF_SLICE_CHICKEN1, 2784 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); 2785 } 2786 2787 if (GRAPHICS_VER(i915) == 7) { 2788 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 2789 wa_masked_en(wal, 2790 RING_MODE_GEN7(RENDER_RING_BASE), 2791 GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE); 2792 2793 /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */ 2794 wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE); 2795 2796 /* 2797 * BSpec says this must be set, even though 2798 * WaDisable4x2SubspanOptimization:ivb,hsw 2799 * WaDisable4x2SubspanOptimization isn't listed for VLV. 2800 */ 2801 wa_masked_en(wal, 2802 CACHE_MODE_1, 2803 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); 2804 2805 /* 2806 * BSpec recommends 8x4 when MSAA is used, 2807 * however in practice 16x4 seems fastest. 2808 * 2809 * Note that PS/WM thread counts depend on the WIZ hashing 2810 * disable bit, which we don't touch here, but it's good 2811 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 2812 */ 2813 wa_masked_field_set(wal, 2814 GEN7_GT_MODE, 2815 GEN6_WIZ_HASHING_MASK, 2816 GEN6_WIZ_HASHING_16x4); 2817 } 2818 2819 if (IS_GRAPHICS_VER(i915, 6, 7)) 2820 /* 2821 * We need to disable the AsyncFlip performance optimisations in 2822 * order to use MI_WAIT_FOR_EVENT within the CS. It should 2823 * already be programmed to '1' on all products. 2824 * 2825 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 2826 */ 2827 wa_masked_en(wal, 2828 RING_MI_MODE(RENDER_RING_BASE), 2829 ASYNC_FLIP_PERF_DISABLE); 2830 2831 if (GRAPHICS_VER(i915) == 6) { 2832 /* 2833 * Required for the hardware to program scanline values for 2834 * waiting 2835 * WaEnableFlushTlbInvalidationMode:snb 2836 */ 2837 wa_masked_en(wal, 2838 GFX_MODE, 2839 GFX_TLB_INVALIDATE_EXPLICIT); 2840 2841 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ 2842 wa_masked_en(wal, 2843 _3D_CHICKEN, 2844 _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB); 2845 2846 wa_masked_en(wal, 2847 _3D_CHICKEN3, 2848 /* WaStripsFansDisableFastClipPerformanceFix:snb */ 2849 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL | 2850 /* 2851 * Bspec says: 2852 * "This bit must be set if 3DSTATE_CLIP clip mode is set 2853 * to normal and 3DSTATE_SF number of SF output attributes 2854 * is more than 16." 2855 */ 2856 _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH); 2857 2858 /* 2859 * BSpec recommends 8x4 when MSAA is used, 2860 * however in practice 16x4 seems fastest. 2861 * 2862 * Note that PS/WM thread counts depend on the WIZ hashing 2863 * disable bit, which we don't touch here, but it's good 2864 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 2865 */ 2866 wa_masked_field_set(wal, 2867 GEN6_GT_MODE, 2868 GEN6_WIZ_HASHING_MASK, 2869 GEN6_WIZ_HASHING_16x4); 2870 2871 /* WaDisable_RenderCache_OperationalFlush:snb */ 2872 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE); 2873 2874 /* 2875 * From the Sandybridge PRM, volume 1 part 3, page 24: 2876 * "If this bit is set, STCunit will have LRA as replacement 2877 * policy. [...] This bit must be reset. LRA replacement 2878 * policy is not supported." 2879 */ 2880 wa_masked_dis(wal, 2881 CACHE_MODE_0, 2882 CM0_STC_EVICT_DISABLE_LRA_SNB); 2883 } 2884 2885 if (IS_GRAPHICS_VER(i915, 4, 6)) 2886 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 2887 wa_add(wal, RING_MI_MODE(RENDER_RING_BASE), 2888 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH), 2889 /* XXX bit doesn't stick on Broadwater */ 2890 IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true); 2891 2892 if (GRAPHICS_VER(i915) == 4) 2893 /* 2894 * Disable CONSTANT_BUFFER before it is loaded from the context 2895 * image. For as it is loaded, it is executed and the stored 2896 * address may no longer be valid, leading to a GPU hang. 2897 * 2898 * This imposes the requirement that userspace reload their 2899 * CONSTANT_BUFFER on every batch, fortunately a requirement 2900 * they are already accustomed to from before contexts were 2901 * enabled. 2902 */ 2903 wa_add(wal, ECOSKPD(RENDER_RING_BASE), 2904 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE), 2905 0 /* XXX bit doesn't stick on Broadwater */, 2906 true); 2907 } 2908 2909 static void 2910 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2911 { 2912 struct drm_i915_private *i915 = engine->i915; 2913 2914 /* WaKBLVECSSemaphoreWaitPoll:kbl */ 2915 if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) { 2916 wa_write(wal, 2917 RING_SEMA_WAIT_POLL(engine->mmio_base), 2918 1); 2919 } 2920 } 2921 2922 static void 2923 ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2924 { 2925 if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) { 2926 /* Wa_14014999345:pvc */ 2927 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC); 2928 } 2929 } 2930 2931 /* 2932 * The bspec performance guide has recommended MMIO tuning settings. These 2933 * aren't truly "workarounds" but we want to program them with the same 2934 * workaround infrastructure to ensure that they're automatically added to 2935 * the GuC save/restore lists, re-applied at the right times, and checked for 2936 * any conflicting programming requested by real workarounds. 2937 * 2938 * Programming settings should be added here only if their registers are not 2939 * part of an engine's register state context. If a register is part of a 2940 * context, then any tuning settings should be programmed in an appropriate 2941 * function invoked by __intel_engine_init_ctx_wa(). 2942 */ 2943 static void 2944 add_render_compute_tuning_settings(struct drm_i915_private *i915, 2945 struct i915_wa_list *wal) 2946 { 2947 if (IS_DG2(i915)) 2948 wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512); 2949 2950 /* 2951 * This tuning setting proves beneficial only on ATS-M designs; the 2952 * default "age based" setting is optimal on regular DG2 and other 2953 * platforms. 2954 */ 2955 if (INTEL_INFO(i915)->tuning_thread_rr_after_dep) 2956 wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE, 2957 THREAD_EX_ARB_MODE_RR_AFTER_DEP); 2958 2959 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) 2960 wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC); 2961 } 2962 2963 /* 2964 * The workarounds in this function apply to shared registers in 2965 * the general render reset domain that aren't tied to a 2966 * specific engine. Since all render+compute engines get reset 2967 * together, and the contents of these registers are lost during 2968 * the shared render domain reset, we'll define such workarounds 2969 * here and then add them to just a single RCS or CCS engine's 2970 * workaround list (whichever engine has the XXXX flag). 2971 */ 2972 static void 2973 general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2974 { 2975 struct drm_i915_private *i915 = engine->i915; 2976 2977 add_render_compute_tuning_settings(i915, wal); 2978 2979 if (GRAPHICS_VER(i915) >= 11) { 2980 /* This is not a Wa (although referred to as 2981 * WaSetInidrectStateOverride in places), this allows 2982 * applications that reference sampler states through 2983 * the BindlessSamplerStateBaseAddress to have their 2984 * border color relative to DynamicStateBaseAddress 2985 * rather than BindlessSamplerStateBaseAddress. 2986 * 2987 * Otherwise SAMPLER_STATE border colors have to be 2988 * copied in multiple heaps (DynamicStateBaseAddress & 2989 * BindlessSamplerStateBaseAddress) 2990 * 2991 * BSpec: 46052 2992 */ 2993 wa_mcr_masked_en(wal, 2994 GEN10_SAMPLER_MODE, 2995 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE); 2996 } 2997 2998 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_B0, STEP_FOREVER) || 2999 IS_MTL_GRAPHICS_STEP(i915, P, STEP_B0, STEP_FOREVER)) 3000 /* Wa_14017856879 */ 3001 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH); 3002 3003 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || 3004 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) 3005 /* 3006 * Wa_14017066071 3007 * Wa_14017654203 3008 */ 3009 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE, 3010 MTL_DISABLE_SAMPLER_SC_OOO); 3011 3012 if (IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) 3013 /* Wa_22015279794 */ 3014 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, 3015 DISABLE_PREFETCH_INTO_IC); 3016 3017 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || 3018 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) || 3019 IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) || 3020 IS_DG2_G11(i915) || IS_DG2_G12(i915)) { 3021 /* Wa_22013037850 */ 3022 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, 3023 DISABLE_128B_EVICTION_COMMAND_UDW); 3024 } 3025 3026 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || 3027 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) || 3028 IS_PONTEVECCHIO(i915) || 3029 IS_DG2(i915)) { 3030 /* Wa_22014226127 */ 3031 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE); 3032 } 3033 3034 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) || 3035 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) || 3036 IS_DG2(i915)) { 3037 /* Wa_18017747507 */ 3038 wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE); 3039 } 3040 3041 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) || 3042 IS_DG2_G11(i915)) { 3043 /* 3044 * Wa_22012826095:dg2 3045 * Wa_22013059131:dg2 3046 */ 3047 wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW, 3048 MAXREQS_PER_BANK, 3049 REG_FIELD_PREP(MAXREQS_PER_BANK, 2)); 3050 3051 /* Wa_22013059131:dg2 */ 3052 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, 3053 FORCE_1_SUB_MESSAGE_PER_FRAGMENT); 3054 } 3055 3056 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) { 3057 /* 3058 * Wa_14010918519:dg2_g10 3059 * 3060 * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping, 3061 * so ignoring verification. 3062 */ 3063 wa_mcr_add(wal, LSC_CHICKEN_BIT_0_UDW, 0, 3064 FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE, 3065 0, false); 3066 } 3067 3068 if (IS_XEHPSDV(i915)) { 3069 /* Wa_1409954639 */ 3070 wa_mcr_masked_en(wal, 3071 GEN8_ROW_CHICKEN, 3072 SYSTOLIC_DOP_CLOCK_GATING_DIS); 3073 3074 /* Wa_1607196519 */ 3075 wa_mcr_masked_en(wal, 3076 GEN9_ROW_CHICKEN4, 3077 GEN12_DISABLE_GRF_CLEAR); 3078 3079 /* Wa_14010449647:xehpsdv */ 3080 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1, 3081 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); 3082 } 3083 3084 if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) { 3085 /* Wa_14015227452:dg2,pvc */ 3086 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE); 3087 3088 /* Wa_16015675438:dg2,pvc */ 3089 wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE); 3090 } 3091 3092 if (IS_DG2(i915)) { 3093 /* 3094 * Wa_16011620976:dg2_g11 3095 * Wa_22015475538:dg2 3096 */ 3097 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8); 3098 } 3099 3100 if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) || IS_DG2_G11(i915)) 3101 /* 3102 * Wa_22012654132 3103 * 3104 * Note that register 0xE420 is write-only and cannot be read 3105 * back for verification on DG2 (due to Wa_14012342262), so 3106 * we need to explicitly skip the readback. 3107 */ 3108 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0, 3109 _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC), 3110 0 /* write-only, so skip validation */, 3111 true); 3112 } 3113 3114 static void 3115 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal) 3116 { 3117 if (GRAPHICS_VER(engine->i915) < 4) 3118 return; 3119 3120 engine_fake_wa_init(engine, wal); 3121 3122 /* 3123 * These are common workarounds that just need to applied 3124 * to a single RCS/CCS engine's workaround list since 3125 * they're reset as part of the general render domain reset. 3126 */ 3127 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) 3128 general_render_compute_wa_init(engine, wal); 3129 3130 if (engine->class == COMPUTE_CLASS) 3131 ccs_engine_wa_init(engine, wal); 3132 else if (engine->class == RENDER_CLASS) 3133 rcs_engine_wa_init(engine, wal); 3134 else 3135 xcs_engine_wa_init(engine, wal); 3136 } 3137 3138 void intel_engine_init_workarounds(struct intel_engine_cs *engine) 3139 { 3140 struct i915_wa_list *wal = &engine->wa_list; 3141 3142 wa_init_start(wal, engine->gt, "engine", engine->name); 3143 engine_init_workarounds(engine, wal); 3144 wa_init_finish(wal); 3145 } 3146 3147 void intel_engine_apply_workarounds(struct intel_engine_cs *engine) 3148 { 3149 wa_list_apply(&engine->wa_list); 3150 } 3151 3152 static const struct i915_range mcr_ranges_gen8[] = { 3153 { .start = 0x5500, .end = 0x55ff }, 3154 { .start = 0x7000, .end = 0x7fff }, 3155 { .start = 0x9400, .end = 0x97ff }, 3156 { .start = 0xb000, .end = 0xb3ff }, 3157 { .start = 0xe000, .end = 0xe7ff }, 3158 {}, 3159 }; 3160 3161 static const struct i915_range mcr_ranges_gen12[] = { 3162 { .start = 0x8150, .end = 0x815f }, 3163 { .start = 0x9520, .end = 0x955f }, 3164 { .start = 0xb100, .end = 0xb3ff }, 3165 { .start = 0xde80, .end = 0xe8ff }, 3166 { .start = 0x24a00, .end = 0x24a7f }, 3167 {}, 3168 }; 3169 3170 static const struct i915_range mcr_ranges_xehp[] = { 3171 { .start = 0x4000, .end = 0x4aff }, 3172 { .start = 0x5200, .end = 0x52ff }, 3173 { .start = 0x5400, .end = 0x7fff }, 3174 { .start = 0x8140, .end = 0x815f }, 3175 { .start = 0x8c80, .end = 0x8dff }, 3176 { .start = 0x94d0, .end = 0x955f }, 3177 { .start = 0x9680, .end = 0x96ff }, 3178 { .start = 0xb000, .end = 0xb3ff }, 3179 { .start = 0xc800, .end = 0xcfff }, 3180 { .start = 0xd800, .end = 0xd8ff }, 3181 { .start = 0xdc00, .end = 0xffff }, 3182 { .start = 0x17000, .end = 0x17fff }, 3183 { .start = 0x24a00, .end = 0x24a7f }, 3184 {}, 3185 }; 3186 3187 static bool mcr_range(struct drm_i915_private *i915, u32 offset) 3188 { 3189 const struct i915_range *mcr_ranges; 3190 int i; 3191 3192 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) 3193 mcr_ranges = mcr_ranges_xehp; 3194 else if (GRAPHICS_VER(i915) >= 12) 3195 mcr_ranges = mcr_ranges_gen12; 3196 else if (GRAPHICS_VER(i915) >= 8) 3197 mcr_ranges = mcr_ranges_gen8; 3198 else 3199 return false; 3200 3201 /* 3202 * Registers in these ranges are affected by the MCR selector 3203 * which only controls CPU initiated MMIO. Routing does not 3204 * work for CS access so we cannot verify them on this path. 3205 */ 3206 for (i = 0; mcr_ranges[i].start; i++) 3207 if (offset >= mcr_ranges[i].start && 3208 offset <= mcr_ranges[i].end) 3209 return true; 3210 3211 return false; 3212 } 3213 3214 static int 3215 wa_list_srm(struct i915_request *rq, 3216 const struct i915_wa_list *wal, 3217 struct i915_vma *vma) 3218 { 3219 struct drm_i915_private *i915 = rq->engine->i915; 3220 unsigned int i, count = 0; 3221 const struct i915_wa *wa; 3222 u32 srm, *cs; 3223 3224 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 3225 if (GRAPHICS_VER(i915) >= 8) 3226 srm++; 3227 3228 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 3229 if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg))) 3230 count++; 3231 } 3232 3233 cs = intel_ring_begin(rq, 4 * count); 3234 if (IS_ERR(cs)) 3235 return PTR_ERR(cs); 3236 3237 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 3238 u32 offset = i915_mmio_reg_offset(wa->reg); 3239 3240 if (mcr_range(i915, offset)) 3241 continue; 3242 3243 *cs++ = srm; 3244 *cs++ = offset; 3245 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; 3246 *cs++ = 0; 3247 } 3248 intel_ring_advance(rq, cs); 3249 3250 return 0; 3251 } 3252 3253 static int engine_wa_list_verify(struct intel_context *ce, 3254 const struct i915_wa_list * const wal, 3255 const char *from) 3256 { 3257 const struct i915_wa *wa; 3258 struct i915_request *rq; 3259 struct i915_vma *vma; 3260 struct i915_gem_ww_ctx ww; 3261 unsigned int i; 3262 u32 *results; 3263 int err; 3264 3265 if (!wal->count) 3266 return 0; 3267 3268 vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm, 3269 wal->count * sizeof(u32)); 3270 if (IS_ERR(vma)) 3271 return PTR_ERR(vma); 3272 3273 intel_engine_pm_get(ce->engine); 3274 i915_gem_ww_ctx_init(&ww, false); 3275 retry: 3276 err = i915_gem_object_lock(vma->obj, &ww); 3277 if (err == 0) 3278 err = intel_context_pin_ww(ce, &ww); 3279 if (err) 3280 goto err_pm; 3281 3282 err = i915_vma_pin_ww(vma, &ww, 0, 0, 3283 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER); 3284 if (err) 3285 goto err_unpin; 3286 3287 rq = i915_request_create(ce); 3288 if (IS_ERR(rq)) { 3289 err = PTR_ERR(rq); 3290 goto err_vma; 3291 } 3292 3293 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 3294 if (err == 0) 3295 err = wa_list_srm(rq, wal, vma); 3296 3297 i915_request_get(rq); 3298 if (err) 3299 i915_request_set_error_once(rq, err); 3300 i915_request_add(rq); 3301 3302 if (err) 3303 goto err_rq; 3304 3305 if (i915_request_wait(rq, 0, HZ / 5) < 0) { 3306 err = -ETIME; 3307 goto err_rq; 3308 } 3309 3310 results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); 3311 if (IS_ERR(results)) { 3312 err = PTR_ERR(results); 3313 goto err_rq; 3314 } 3315 3316 err = 0; 3317 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 3318 if (mcr_range(rq->engine->i915, i915_mmio_reg_offset(wa->reg))) 3319 continue; 3320 3321 if (!wa_verify(wal->gt, wa, results[i], wal->name, from)) 3322 err = -ENXIO; 3323 } 3324 3325 i915_gem_object_unpin_map(vma->obj); 3326 3327 err_rq: 3328 i915_request_put(rq); 3329 err_vma: 3330 i915_vma_unpin(vma); 3331 err_unpin: 3332 intel_context_unpin(ce); 3333 err_pm: 3334 if (err == -EDEADLK) { 3335 err = i915_gem_ww_ctx_backoff(&ww); 3336 if (!err) 3337 goto retry; 3338 } 3339 i915_gem_ww_ctx_fini(&ww); 3340 intel_engine_pm_put(ce->engine); 3341 i915_vma_put(vma); 3342 return err; 3343 } 3344 3345 int intel_engine_verify_workarounds(struct intel_engine_cs *engine, 3346 const char *from) 3347 { 3348 return engine_wa_list_verify(engine->kernel_context, 3349 &engine->wa_list, 3350 from); 3351 } 3352 3353 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 3354 #include "selftest_workarounds.c" 3355 #endif 3356