1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2018 Intel Corporation
5  */
6 
7 #include "i915_drv.h"
8 #include "intel_context.h"
9 #include "intel_gt.h"
10 #include "intel_workarounds.h"
11 
12 /**
13  * DOC: Hardware workarounds
14  *
15  * This file is intended as a central place to implement most [1]_ of the
16  * required workarounds for hardware to work as originally intended. They fall
17  * in five basic categories depending on how/when they are applied:
18  *
19  * - Workarounds that touch registers that are saved/restored to/from the HW
20  *   context image. The list is emitted (via Load Register Immediate commands)
21  *   everytime a new context is created.
22  * - GT workarounds. The list of these WAs is applied whenever these registers
23  *   revert to default values (on GPU reset, suspend/resume [2]_, etc..).
24  * - Display workarounds. The list is applied during display clock-gating
25  *   initialization.
26  * - Workarounds that whitelist a privileged register, so that UMDs can manage
27  *   them directly. This is just a special case of a MMMIO workaround (as we
28  *   write the list of these to/be-whitelisted registers to some special HW
29  *   registers).
30  * - Workaround batchbuffers, that get executed automatically by the hardware
31  *   on every HW context restore.
32  *
33  * .. [1] Please notice that there are other WAs that, due to their nature,
34  *    cannot be applied from a central place. Those are peppered around the rest
35  *    of the code, as needed.
36  *
37  * .. [2] Technically, some registers are powercontext saved & restored, so they
38  *    survive a suspend/resume. In practice, writing them again is not too
39  *    costly and simplifies things. We can revisit this in the future.
40  *
41  * Layout
42  * ~~~~~~
43  *
44  * Keep things in this file ordered by WA type, as per the above (context, GT,
45  * display, register whitelist, batchbuffer). Then, inside each type, keep the
46  * following order:
47  *
48  * - Infrastructure functions and macros
49  * - WAs per platform in standard gen/chrono order
50  * - Public functions to init or apply the given workaround type.
51  */
52 
53 static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
54 {
55 	wal->name = name;
56 	wal->engine_name = engine_name;
57 }
58 
59 #define WA_LIST_CHUNK (1 << 4)
60 
61 static void wa_init_finish(struct i915_wa_list *wal)
62 {
63 	/* Trim unused entries. */
64 	if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
65 		struct i915_wa *list = kmemdup(wal->list,
66 					       wal->count * sizeof(*list),
67 					       GFP_KERNEL);
68 
69 		if (list) {
70 			kfree(wal->list);
71 			wal->list = list;
72 		}
73 	}
74 
75 	if (!wal->count)
76 		return;
77 
78 	DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",
79 			 wal->wa_count, wal->name, wal->engine_name);
80 }
81 
82 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
83 {
84 	unsigned int addr = i915_mmio_reg_offset(wa->reg);
85 	unsigned int start = 0, end = wal->count;
86 	const unsigned int grow = WA_LIST_CHUNK;
87 	struct i915_wa *wa_;
88 
89 	GEM_BUG_ON(!is_power_of_2(grow));
90 
91 	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
92 		struct i915_wa *list;
93 
94 		list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
95 				     GFP_KERNEL);
96 		if (!list) {
97 			DRM_ERROR("No space for workaround init!\n");
98 			return;
99 		}
100 
101 		if (wal->list)
102 			memcpy(list, wal->list, sizeof(*wa) * wal->count);
103 
104 		wal->list = list;
105 	}
106 
107 	while (start < end) {
108 		unsigned int mid = start + (end - start) / 2;
109 
110 		if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
111 			start = mid + 1;
112 		} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
113 			end = mid;
114 		} else {
115 			wa_ = &wal->list[mid];
116 
117 			if ((wa->mask & ~wa_->mask) == 0) {
118 				DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
119 					  i915_mmio_reg_offset(wa_->reg),
120 					  wa_->mask, wa_->val);
121 
122 				wa_->val &= ~wa->mask;
123 			}
124 
125 			wal->wa_count++;
126 			wa_->val |= wa->val;
127 			wa_->mask |= wa->mask;
128 			wa_->read |= wa->read;
129 			return;
130 		}
131 	}
132 
133 	wal->wa_count++;
134 	wa_ = &wal->list[wal->count++];
135 	*wa_ = *wa;
136 
137 	while (wa_-- > wal->list) {
138 		GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
139 			   i915_mmio_reg_offset(wa_[1].reg));
140 		if (i915_mmio_reg_offset(wa_[1].reg) >
141 		    i915_mmio_reg_offset(wa_[0].reg))
142 			break;
143 
144 		swap(wa_[1], wa_[0]);
145 	}
146 }
147 
148 static void
149 wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
150 		   u32 val)
151 {
152 	struct i915_wa wa = {
153 		.reg  = reg,
154 		.mask = mask,
155 		.val  = val,
156 		.read = mask,
157 	};
158 
159 	_wa_add(wal, &wa);
160 }
161 
162 static void
163 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
164 {
165 	wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
166 }
167 
168 static void
169 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
170 {
171 	wa_write_masked_or(wal, reg, ~0, val);
172 }
173 
174 static void
175 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
176 {
177 	wa_write_masked_or(wal, reg, val, val);
178 }
179 
180 #define WA_SET_BIT_MASKED(addr, mask) \
181 	wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
182 
183 #define WA_CLR_BIT_MASKED(addr, mask) \
184 	wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
185 
186 #define WA_SET_FIELD_MASKED(addr, mask, value) \
187 	wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
188 
189 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
190 				      struct i915_wa_list *wal)
191 {
192 	WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
193 
194 	/* WaDisableAsyncFlipPerfMode:bdw,chv */
195 	WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
196 
197 	/* WaDisablePartialInstShootdown:bdw,chv */
198 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
199 			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
200 
201 	/* Use Force Non-Coherent whenever executing a 3D context. This is a
202 	 * workaround for for a possible hang in the unlikely event a TLB
203 	 * invalidation occurs during a PSD flush.
204 	 */
205 	/* WaForceEnableNonCoherent:bdw,chv */
206 	/* WaHdcDisableFetchWhenMasked:bdw,chv */
207 	WA_SET_BIT_MASKED(HDC_CHICKEN0,
208 			  HDC_DONOT_FETCH_MEM_WHEN_MASKED |
209 			  HDC_FORCE_NON_COHERENT);
210 
211 	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
212 	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
213 	 *  polygons in the same 8x4 pixel/sample area to be processed without
214 	 *  stalling waiting for the earlier ones to write to Hierarchical Z
215 	 *  buffer."
216 	 *
217 	 * This optimization is off by default for BDW and CHV; turn it on.
218 	 */
219 	WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
220 
221 	/* Wa4x4STCOptimizationDisable:bdw,chv */
222 	WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
223 
224 	/*
225 	 * BSpec recommends 8x4 when MSAA is used,
226 	 * however in practice 16x4 seems fastest.
227 	 *
228 	 * Note that PS/WM thread counts depend on the WIZ hashing
229 	 * disable bit, which we don't touch here, but it's good
230 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
231 	 */
232 	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
233 			    GEN6_WIZ_HASHING_MASK,
234 			    GEN6_WIZ_HASHING_16x4);
235 }
236 
237 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
238 				     struct i915_wa_list *wal)
239 {
240 	struct drm_i915_private *i915 = engine->i915;
241 
242 	gen8_ctx_workarounds_init(engine, wal);
243 
244 	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
245 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
246 
247 	/* WaDisableDopClockGating:bdw
248 	 *
249 	 * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
250 	 * to disable EUTC clock gating.
251 	 */
252 	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
253 			  DOP_CLOCK_GATING_DISABLE);
254 
255 	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
256 			  GEN8_SAMPLER_POWER_BYPASS_DIS);
257 
258 	WA_SET_BIT_MASKED(HDC_CHICKEN0,
259 			  /* WaForceContextSaveRestoreNonCoherent:bdw */
260 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
261 			  /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
262 			  (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
263 }
264 
265 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
266 				     struct i915_wa_list *wal)
267 {
268 	gen8_ctx_workarounds_init(engine, wal);
269 
270 	/* WaDisableThreadStallDopClockGating:chv */
271 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
272 
273 	/* Improve HiZ throughput on CHV. */
274 	WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
275 }
276 
277 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
278 				      struct i915_wa_list *wal)
279 {
280 	struct drm_i915_private *i915 = engine->i915;
281 
282 	if (HAS_LLC(i915)) {
283 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
284 		 *
285 		 * Must match Display Engine. See
286 		 * WaCompressedResourceDisplayNewHashMode.
287 		 */
288 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
289 				  GEN9_PBE_COMPRESSED_HASH_SELECTION);
290 		WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
291 				  GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
292 	}
293 
294 	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
295 	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
296 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
297 			  FLOW_CONTROL_ENABLE |
298 			  PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
299 
300 	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
301 	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
302 	WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
303 			  GEN9_ENABLE_YV12_BUGFIX |
304 			  GEN9_ENABLE_GPGPU_PREEMPTION);
305 
306 	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
307 	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
308 	WA_SET_BIT_MASKED(CACHE_MODE_1,
309 			  GEN8_4x4_STC_OPTIMIZATION_DISABLE |
310 			  GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
311 
312 	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
313 	WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
314 			  GEN9_CCS_TLB_PREFETCH_ENABLE);
315 
316 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
317 	WA_SET_BIT_MASKED(HDC_CHICKEN0,
318 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
319 			  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
320 
321 	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
322 	 * both tied to WaForceContextSaveRestoreNonCoherent
323 	 * in some hsds for skl. We keep the tie for all gen9. The
324 	 * documentation is a bit hazy and so we want to get common behaviour,
325 	 * even though there is no clear evidence we would need both on kbl/bxt.
326 	 * This area has been source of system hangs so we play it safe
327 	 * and mimic the skl regardless of what bspec says.
328 	 *
329 	 * Use Force Non-Coherent whenever executing a 3D context. This
330 	 * is a workaround for a possible hang in the unlikely event
331 	 * a TLB invalidation occurs during a PSD flush.
332 	 */
333 
334 	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
335 	WA_SET_BIT_MASKED(HDC_CHICKEN0,
336 			  HDC_FORCE_NON_COHERENT);
337 
338 	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
339 	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
340 		WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
341 				  GEN8_SAMPLER_POWER_BYPASS_DIS);
342 
343 	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
344 	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
345 
346 	/*
347 	 * Supporting preemption with fine-granularity requires changes in the
348 	 * batch buffer programming. Since we can't break old userspace, we
349 	 * need to set our default preemption level to safe value. Userspace is
350 	 * still able to use more fine-grained preemption levels, since in
351 	 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
352 	 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
353 	 * not real HW workarounds, but merely a way to start using preemption
354 	 * while maintaining old contract with userspace.
355 	 */
356 
357 	/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
358 	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
359 
360 	/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
361 	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
362 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
363 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
364 
365 	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
366 	if (IS_GEN9_LP(i915))
367 		WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
368 }
369 
370 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
371 				struct i915_wa_list *wal)
372 {
373 	struct drm_i915_private *i915 = engine->i915;
374 	u8 vals[3] = { 0, 0, 0 };
375 	unsigned int i;
376 
377 	for (i = 0; i < 3; i++) {
378 		u8 ss;
379 
380 		/*
381 		 * Only consider slices where one, and only one, subslice has 7
382 		 * EUs
383 		 */
384 		if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
385 			continue;
386 
387 		/*
388 		 * subslice_7eu[i] != 0 (because of the check above) and
389 		 * ss_max == 4 (maximum number of subslices possible per slice)
390 		 *
391 		 * ->    0 <= ss <= 3;
392 		 */
393 		ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
394 		vals[i] = 3 - ss;
395 	}
396 
397 	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
398 		return;
399 
400 	/* Tune IZ hashing. See intel_device_info_runtime_init() */
401 	WA_SET_FIELD_MASKED(GEN7_GT_MODE,
402 			    GEN9_IZ_HASHING_MASK(2) |
403 			    GEN9_IZ_HASHING_MASK(1) |
404 			    GEN9_IZ_HASHING_MASK(0),
405 			    GEN9_IZ_HASHING(2, vals[2]) |
406 			    GEN9_IZ_HASHING(1, vals[1]) |
407 			    GEN9_IZ_HASHING(0, vals[0]));
408 }
409 
410 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
411 				     struct i915_wa_list *wal)
412 {
413 	gen9_ctx_workarounds_init(engine, wal);
414 	skl_tune_iz_hashing(engine, wal);
415 }
416 
417 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
418 				     struct i915_wa_list *wal)
419 {
420 	gen9_ctx_workarounds_init(engine, wal);
421 
422 	/* WaDisableThreadStallDopClockGating:bxt */
423 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
424 			  STALL_DOP_GATING_DISABLE);
425 
426 	/* WaToEnableHwFixForPushConstHWBug:bxt */
427 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
428 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
429 }
430 
431 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
432 				     struct i915_wa_list *wal)
433 {
434 	struct drm_i915_private *i915 = engine->i915;
435 
436 	gen9_ctx_workarounds_init(engine, wal);
437 
438 	/* WaToEnableHwFixForPushConstHWBug:kbl */
439 	if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
440 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
441 				  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
442 
443 	/* WaDisableSbeCacheDispatchPortSharing:kbl */
444 	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
445 			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
446 }
447 
448 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
449 				     struct i915_wa_list *wal)
450 {
451 	gen9_ctx_workarounds_init(engine, wal);
452 
453 	/* WaToEnableHwFixForPushConstHWBug:glk */
454 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
455 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
456 }
457 
458 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
459 				     struct i915_wa_list *wal)
460 {
461 	gen9_ctx_workarounds_init(engine, wal);
462 
463 	/* WaToEnableHwFixForPushConstHWBug:cfl */
464 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
465 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
466 
467 	/* WaDisableSbeCacheDispatchPortSharing:cfl */
468 	WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
469 			  GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
470 }
471 
472 static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
473 				     struct i915_wa_list *wal)
474 {
475 	struct drm_i915_private *i915 = engine->i915;
476 
477 	/* WaForceContextSaveRestoreNonCoherent:cnl */
478 	WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
479 			  HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
480 
481 	/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
482 	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
483 		WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
484 
485 	/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
486 	WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
487 			  GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
488 
489 	/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
490 	if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
491 		WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
492 				  GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
493 
494 	/* WaPushConstantDereferenceHoldDisable:cnl */
495 	WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
496 
497 	/* FtrEnableFastAnisoL1BankingFix:cnl */
498 	WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
499 
500 	/* WaDisable3DMidCmdPreemption:cnl */
501 	WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
502 
503 	/* WaDisableGPGPUMidCmdPreemption:cnl */
504 	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
505 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
506 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
507 
508 	/* WaDisableEarlyEOT:cnl */
509 	WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
510 }
511 
512 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
513 				     struct i915_wa_list *wal)
514 {
515 	struct drm_i915_private *i915 = engine->i915;
516 
517 	/* WaDisableBankHangMode:icl */
518 	wa_write(wal,
519 		 GEN8_L3CNTLREG,
520 		 intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
521 		 GEN8_ERRDETBCTRL);
522 
523 	/* Wa_1604370585:icl (pre-prod)
524 	 * Formerly known as WaPushConstantDereferenceHoldDisable
525 	 */
526 	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
527 		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
528 				  PUSH_CONSTANT_DEREF_DISABLE);
529 
530 	/* WaForceEnableNonCoherent:icl
531 	 * This is not the same workaround as in early Gen9 platforms, where
532 	 * lacking this could cause system hangs, but coherency performance
533 	 * overhead is high and only a few compute workloads really need it
534 	 * (the register is whitelisted in hardware now, so UMDs can opt in
535 	 * for coherency if they have a good reason).
536 	 */
537 	WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
538 
539 	/* Wa_2006611047:icl (pre-prod)
540 	 * Formerly known as WaDisableImprovedTdlClkGating
541 	 */
542 	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
543 		WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
544 				  GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
545 
546 	/* Wa_2006665173:icl (pre-prod) */
547 	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
548 		WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
549 				  GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
550 
551 	/* WaEnableFloatBlendOptimization:icl */
552 	wa_write_masked_or(wal,
553 			   GEN10_CACHE_MODE_SS,
554 			   0, /* write-only, so skip validation */
555 			   _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
556 
557 	/* WaDisableGPGPUMidThreadPreemption:icl */
558 	WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
559 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
560 			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
561 
562 	/* allow headerless messages for preemptible GPGPU context */
563 	WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
564 			  GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
565 }
566 
567 static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
568 				     struct i915_wa_list *wal)
569 {
570 	/* Wa_1409142259 */
571 	WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
572 			  GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
573 }
574 
575 static void
576 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
577 			   struct i915_wa_list *wal,
578 			   const char *name)
579 {
580 	struct drm_i915_private *i915 = engine->i915;
581 
582 	if (engine->class != RENDER_CLASS)
583 		return;
584 
585 	wa_init_start(wal, name, engine->name);
586 
587 	if (IS_GEN(i915, 12))
588 		tgl_ctx_workarounds_init(engine, wal);
589 	else if (IS_GEN(i915, 11))
590 		icl_ctx_workarounds_init(engine, wal);
591 	else if (IS_CANNONLAKE(i915))
592 		cnl_ctx_workarounds_init(engine, wal);
593 	else if (IS_COFFEELAKE(i915))
594 		cfl_ctx_workarounds_init(engine, wal);
595 	else if (IS_GEMINILAKE(i915))
596 		glk_ctx_workarounds_init(engine, wal);
597 	else if (IS_KABYLAKE(i915))
598 		kbl_ctx_workarounds_init(engine, wal);
599 	else if (IS_BROXTON(i915))
600 		bxt_ctx_workarounds_init(engine, wal);
601 	else if (IS_SKYLAKE(i915))
602 		skl_ctx_workarounds_init(engine, wal);
603 	else if (IS_CHERRYVIEW(i915))
604 		chv_ctx_workarounds_init(engine, wal);
605 	else if (IS_BROADWELL(i915))
606 		bdw_ctx_workarounds_init(engine, wal);
607 	else if (INTEL_GEN(i915) < 8)
608 		return;
609 	else
610 		MISSING_CASE(INTEL_GEN(i915));
611 
612 	wa_init_finish(wal);
613 }
614 
615 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
616 {
617 	__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
618 }
619 
620 int intel_engine_emit_ctx_wa(struct i915_request *rq)
621 {
622 	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
623 	struct i915_wa *wa;
624 	unsigned int i;
625 	u32 *cs;
626 	int ret;
627 
628 	if (wal->count == 0)
629 		return 0;
630 
631 	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
632 	if (ret)
633 		return ret;
634 
635 	cs = intel_ring_begin(rq, (wal->count * 2 + 2));
636 	if (IS_ERR(cs))
637 		return PTR_ERR(cs);
638 
639 	*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
640 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
641 		*cs++ = i915_mmio_reg_offset(wa->reg);
642 		*cs++ = wa->val;
643 	}
644 	*cs++ = MI_NOOP;
645 
646 	intel_ring_advance(rq, cs);
647 
648 	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
649 	if (ret)
650 		return ret;
651 
652 	return 0;
653 }
654 
655 static void
656 gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
657 {
658 	/* WaDisableKillLogic:bxt,skl,kbl */
659 	if (!IS_COFFEELAKE(i915))
660 		wa_write_or(wal,
661 			    GAM_ECOCHK,
662 			    ECOCHK_DIS_TLB);
663 
664 	if (HAS_LLC(i915)) {
665 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
666 		 *
667 		 * Must match Display Engine. See
668 		 * WaCompressedResourceDisplayNewHashMode.
669 		 */
670 		wa_write_or(wal,
671 			    MMCD_MISC_CTRL,
672 			    MMCD_PCLA | MMCD_HOTSPOT_EN);
673 	}
674 
675 	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
676 	wa_write_or(wal,
677 		    GAM_ECOCHK,
678 		    BDW_DISABLE_HDC_INVALIDATION);
679 }
680 
681 static void
682 skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
683 {
684 	gen9_gt_workarounds_init(i915, wal);
685 
686 	/* WaDisableGafsUnitClkGating:skl */
687 	wa_write_or(wal,
688 		    GEN7_UCGCTL4,
689 		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
690 
691 	/* WaInPlaceDecompressionHang:skl */
692 	if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
693 		wa_write_or(wal,
694 			    GEN9_GAMT_ECO_REG_RW_IA,
695 			    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
696 }
697 
698 static void
699 bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
700 {
701 	gen9_gt_workarounds_init(i915, wal);
702 
703 	/* WaInPlaceDecompressionHang:bxt */
704 	wa_write_or(wal,
705 		    GEN9_GAMT_ECO_REG_RW_IA,
706 		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
707 }
708 
709 static void
710 kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
711 {
712 	gen9_gt_workarounds_init(i915, wal);
713 
714 	/* WaDisableDynamicCreditSharing:kbl */
715 	if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
716 		wa_write_or(wal,
717 			    GAMT_CHKN_BIT_REG,
718 			    GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
719 
720 	/* WaDisableGafsUnitClkGating:kbl */
721 	wa_write_or(wal,
722 		    GEN7_UCGCTL4,
723 		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
724 
725 	/* WaInPlaceDecompressionHang:kbl */
726 	wa_write_or(wal,
727 		    GEN9_GAMT_ECO_REG_RW_IA,
728 		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
729 }
730 
731 static void
732 glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
733 {
734 	gen9_gt_workarounds_init(i915, wal);
735 }
736 
737 static void
738 cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
739 {
740 	gen9_gt_workarounds_init(i915, wal);
741 
742 	/* WaDisableGafsUnitClkGating:cfl */
743 	wa_write_or(wal,
744 		    GEN7_UCGCTL4,
745 		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
746 
747 	/* WaInPlaceDecompressionHang:cfl */
748 	wa_write_or(wal,
749 		    GEN9_GAMT_ECO_REG_RW_IA,
750 		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
751 }
752 
753 static void
754 wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
755 {
756 	const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
757 	unsigned int slice, subslice;
758 	u32 l3_en, mcr, mcr_mask;
759 
760 	GEM_BUG_ON(INTEL_GEN(i915) < 10);
761 
762 	/*
763 	 * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
764 	 * L3Banks could be fused off in single slice scenario. If that is
765 	 * the case, we might need to program MCR select to a valid L3Bank
766 	 * by default, to make sure we correctly read certain registers
767 	 * later on (in the range 0xB100 - 0xB3FF).
768 	 *
769 	 * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
770 	 * Before any MMIO read into slice/subslice specific registers, MCR
771 	 * packet control register needs to be programmed to point to any
772 	 * enabled s/ss pair. Otherwise, incorrect values will be returned.
773 	 * This means each subsequent MMIO read will be forwarded to an
774 	 * specific s/ss combination, but this is OK since these registers
775 	 * are consistent across s/ss in almost all cases. In the rare
776 	 * occasions, such as INSTDONE, where this value is dependent
777 	 * on s/ss combo, the read should be done with read_subslice_reg.
778 	 *
779 	 * Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both
780 	 * to which subslice, or to which L3 bank, the respective mmio reads
781 	 * will go, we have to find a common index which works for both
782 	 * accesses.
783 	 *
784 	 * Case where we cannot find a common index fortunately should not
785 	 * happen in production hardware, so we only emit a warning instead of
786 	 * implementing something more complex that requires checking the range
787 	 * of every MMIO read.
788 	 */
789 
790 	if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
791 		u32 l3_fuse =
792 			intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
793 			GEN10_L3BANK_MASK;
794 
795 		DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
796 		l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
797 	} else {
798 		l3_en = ~0;
799 	}
800 
801 	slice = fls(sseu->slice_mask) - 1;
802 	subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
803 	if (!subslice) {
804 		DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
805 			 intel_sseu_get_subslices(sseu, slice), l3_en);
806 		subslice = fls(l3_en);
807 		WARN_ON(!subslice);
808 	}
809 	subslice--;
810 
811 	if (INTEL_GEN(i915) >= 11) {
812 		mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
813 		mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
814 	} else {
815 		mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
816 		mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
817 	}
818 
819 	DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);
820 
821 	wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
822 }
823 
824 static void
825 cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
826 {
827 	wa_init_mcr(i915, wal);
828 
829 	/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
830 	if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
831 		wa_write_or(wal,
832 			    GAMT_CHKN_BIT_REG,
833 			    GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
834 
835 	/* WaInPlaceDecompressionHang:cnl */
836 	wa_write_or(wal,
837 		    GEN9_GAMT_ECO_REG_RW_IA,
838 		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
839 }
840 
841 static void
842 icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
843 {
844 	wa_init_mcr(i915, wal);
845 
846 	/* WaInPlaceDecompressionHang:icl */
847 	wa_write_or(wal,
848 		    GEN9_GAMT_ECO_REG_RW_IA,
849 		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
850 
851 	/* WaModifyGamTlbPartitioning:icl */
852 	wa_write_masked_or(wal,
853 			   GEN11_GACB_PERF_CTRL,
854 			   GEN11_HASH_CTRL_MASK,
855 			   GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
856 
857 	/* Wa_1405766107:icl
858 	 * Formerly known as WaCL2SFHalfMaxAlloc
859 	 */
860 	wa_write_or(wal,
861 		    GEN11_LSN_UNSLCVC,
862 		    GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
863 		    GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
864 
865 	/* Wa_220166154:icl
866 	 * Formerly known as WaDisCtxReload
867 	 */
868 	wa_write_or(wal,
869 		    GEN8_GAMW_ECO_DEV_RW_IA,
870 		    GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
871 
872 	/* Wa_1405779004:icl (pre-prod) */
873 	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
874 		wa_write_or(wal,
875 			    SLICE_UNIT_LEVEL_CLKGATE,
876 			    MSCUNIT_CLKGATE_DIS);
877 
878 	/* Wa_1406680159:icl */
879 	wa_write_or(wal,
880 		    SUBSLICE_UNIT_LEVEL_CLKGATE,
881 		    GWUNIT_CLKGATE_DIS);
882 
883 	/* Wa_1406838659:icl (pre-prod) */
884 	if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
885 		wa_write_or(wal,
886 			    INF_UNIT_LEVEL_CLKGATE,
887 			    CGPSF_CLKGATE_DIS);
888 
889 	/* Wa_1406463099:icl
890 	 * Formerly known as WaGamTlbPendError
891 	 */
892 	wa_write_or(wal,
893 		    GAMT_CHKN_BIT_REG,
894 		    GAMT_CHKN_DISABLE_L3_COH_PIPE);
895 }
896 
897 static void
898 tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
899 {
900 }
901 
902 static void
903 gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
904 {
905 	if (IS_GEN(i915, 12))
906 		tgl_gt_workarounds_init(i915, wal);
907 	else if (IS_GEN(i915, 11))
908 		icl_gt_workarounds_init(i915, wal);
909 	else if (IS_CANNONLAKE(i915))
910 		cnl_gt_workarounds_init(i915, wal);
911 	else if (IS_COFFEELAKE(i915))
912 		cfl_gt_workarounds_init(i915, wal);
913 	else if (IS_GEMINILAKE(i915))
914 		glk_gt_workarounds_init(i915, wal);
915 	else if (IS_KABYLAKE(i915))
916 		kbl_gt_workarounds_init(i915, wal);
917 	else if (IS_BROXTON(i915))
918 		bxt_gt_workarounds_init(i915, wal);
919 	else if (IS_SKYLAKE(i915))
920 		skl_gt_workarounds_init(i915, wal);
921 	else if (INTEL_GEN(i915) <= 8)
922 		return;
923 	else
924 		MISSING_CASE(INTEL_GEN(i915));
925 }
926 
927 void intel_gt_init_workarounds(struct drm_i915_private *i915)
928 {
929 	struct i915_wa_list *wal = &i915->gt_wa_list;
930 
931 	wa_init_start(wal, "GT", "global");
932 	gt_init_workarounds(i915, wal);
933 	wa_init_finish(wal);
934 }
935 
936 static enum forcewake_domains
937 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
938 {
939 	enum forcewake_domains fw = 0;
940 	struct i915_wa *wa;
941 	unsigned int i;
942 
943 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
944 		fw |= intel_uncore_forcewake_for_reg(uncore,
945 						     wa->reg,
946 						     FW_REG_READ |
947 						     FW_REG_WRITE);
948 
949 	return fw;
950 }
951 
952 static bool
953 wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
954 {
955 	if ((cur ^ wa->val) & wa->read) {
956 		DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
957 			  name, from, i915_mmio_reg_offset(wa->reg),
958 			  cur, cur & wa->read,
959 			  wa->val, wa->mask);
960 
961 		return false;
962 	}
963 
964 	return true;
965 }
966 
967 static void
968 wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
969 {
970 	enum forcewake_domains fw;
971 	unsigned long flags;
972 	struct i915_wa *wa;
973 	unsigned int i;
974 
975 	if (!wal->count)
976 		return;
977 
978 	fw = wal_get_fw_for_rmw(uncore, wal);
979 
980 	spin_lock_irqsave(&uncore->lock, flags);
981 	intel_uncore_forcewake_get__locked(uncore, fw);
982 
983 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
984 		intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
985 		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
986 			wa_verify(wa,
987 				  intel_uncore_read_fw(uncore, wa->reg),
988 				  wal->name, "application");
989 	}
990 
991 	intel_uncore_forcewake_put__locked(uncore, fw);
992 	spin_unlock_irqrestore(&uncore->lock, flags);
993 }
994 
995 void intel_gt_apply_workarounds(struct intel_gt *gt)
996 {
997 	wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
998 }
999 
1000 static bool wa_list_verify(struct intel_uncore *uncore,
1001 			   const struct i915_wa_list *wal,
1002 			   const char *from)
1003 {
1004 	struct i915_wa *wa;
1005 	unsigned int i;
1006 	bool ok = true;
1007 
1008 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1009 		ok &= wa_verify(wa,
1010 				intel_uncore_read(uncore, wa->reg),
1011 				wal->name, from);
1012 
1013 	return ok;
1014 }
1015 
1016 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1017 {
1018 	return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
1019 }
1020 
1021 static inline bool is_nonpriv_flags_valid(u32 flags)
1022 {
1023 	/* Check only valid flag bits are set */
1024 	if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
1025 		return false;
1026 
1027 	/* NB: Only 3 out of 4 enum values are valid for access field */
1028 	if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
1029 	    RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
1030 		return false;
1031 
1032 	return true;
1033 }
1034 
1035 static void
1036 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1037 {
1038 	struct i915_wa wa = {
1039 		.reg = reg
1040 	};
1041 
1042 	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1043 		return;
1044 
1045 	if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1046 		return;
1047 
1048 	wa.reg.reg |= flags;
1049 	_wa_add(wal, &wa);
1050 }
1051 
1052 static void
1053 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1054 {
1055 	whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1056 }
1057 
1058 static void gen9_whitelist_build(struct i915_wa_list *w)
1059 {
1060 	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1061 	whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1062 
1063 	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1064 	whitelist_reg(w, GEN8_CS_CHICKEN1);
1065 
1066 	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1067 	whitelist_reg(w, GEN8_HDC_CHICKEN1);
1068 
1069 	/* WaSendPushConstantsFromMMIO:skl,bxt */
1070 	whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1071 }
1072 
1073 static void skl_whitelist_build(struct intel_engine_cs *engine)
1074 {
1075 	struct i915_wa_list *w = &engine->whitelist;
1076 
1077 	if (engine->class != RENDER_CLASS)
1078 		return;
1079 
1080 	gen9_whitelist_build(w);
1081 
1082 	/* WaDisableLSQCROPERFforOCL:skl */
1083 	whitelist_reg(w, GEN8_L3SQCREG4);
1084 }
1085 
1086 static void bxt_whitelist_build(struct intel_engine_cs *engine)
1087 {
1088 	if (engine->class != RENDER_CLASS)
1089 		return;
1090 
1091 	gen9_whitelist_build(&engine->whitelist);
1092 }
1093 
1094 static void kbl_whitelist_build(struct intel_engine_cs *engine)
1095 {
1096 	struct i915_wa_list *w = &engine->whitelist;
1097 
1098 	if (engine->class != RENDER_CLASS)
1099 		return;
1100 
1101 	gen9_whitelist_build(w);
1102 
1103 	/* WaDisableLSQCROPERFforOCL:kbl */
1104 	whitelist_reg(w, GEN8_L3SQCREG4);
1105 }
1106 
1107 static void glk_whitelist_build(struct intel_engine_cs *engine)
1108 {
1109 	struct i915_wa_list *w = &engine->whitelist;
1110 
1111 	if (engine->class != RENDER_CLASS)
1112 		return;
1113 
1114 	gen9_whitelist_build(w);
1115 
1116 	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1117 	whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1118 }
1119 
1120 static void cfl_whitelist_build(struct intel_engine_cs *engine)
1121 {
1122 	struct i915_wa_list *w = &engine->whitelist;
1123 
1124 	if (engine->class != RENDER_CLASS)
1125 		return;
1126 
1127 	gen9_whitelist_build(w);
1128 
1129 	/*
1130 	 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
1131 	 *
1132 	 * This covers 4 register which are next to one another :
1133 	 *   - PS_INVOCATION_COUNT
1134 	 *   - PS_INVOCATION_COUNT_UDW
1135 	 *   - PS_DEPTH_COUNT
1136 	 *   - PS_DEPTH_COUNT_UDW
1137 	 */
1138 	whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1139 			  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1140 			  RING_FORCE_TO_NONPRIV_RANGE_4);
1141 }
1142 
1143 static void cnl_whitelist_build(struct intel_engine_cs *engine)
1144 {
1145 	struct i915_wa_list *w = &engine->whitelist;
1146 
1147 	if (engine->class != RENDER_CLASS)
1148 		return;
1149 
1150 	/* WaEnablePreemptionGranularityControlByUMD:cnl */
1151 	whitelist_reg(w, GEN8_CS_CHICKEN1);
1152 }
1153 
1154 static void icl_whitelist_build(struct intel_engine_cs *engine)
1155 {
1156 	struct i915_wa_list *w = &engine->whitelist;
1157 
1158 	switch (engine->class) {
1159 	case RENDER_CLASS:
1160 		/* WaAllowUMDToModifyHalfSliceChicken7:icl */
1161 		whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
1162 
1163 		/* WaAllowUMDToModifySamplerMode:icl */
1164 		whitelist_reg(w, GEN10_SAMPLER_MODE);
1165 
1166 		/* WaEnableStateCacheRedirectToCS:icl */
1167 		whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1168 
1169 		/*
1170 		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
1171 		 *
1172 		 * This covers 4 register which are next to one another :
1173 		 *   - PS_INVOCATION_COUNT
1174 		 *   - PS_INVOCATION_COUNT_UDW
1175 		 *   - PS_DEPTH_COUNT
1176 		 *   - PS_DEPTH_COUNT_UDW
1177 		 */
1178 		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1179 				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1180 				  RING_FORCE_TO_NONPRIV_RANGE_4);
1181 		break;
1182 
1183 	case VIDEO_DECODE_CLASS:
1184 		/* hucStatusRegOffset */
1185 		whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
1186 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1187 		/* hucUKernelHdrInfoRegOffset */
1188 		whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
1189 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1190 		/* hucStatus2RegOffset */
1191 		whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
1192 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1193 		break;
1194 
1195 	default:
1196 		break;
1197 	}
1198 }
1199 
1200 static void tgl_whitelist_build(struct intel_engine_cs *engine)
1201 {
1202 }
1203 
1204 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
1205 {
1206 	struct drm_i915_private *i915 = engine->i915;
1207 	struct i915_wa_list *w = &engine->whitelist;
1208 
1209 	wa_init_start(w, "whitelist", engine->name);
1210 
1211 	if (IS_GEN(i915, 12))
1212 		tgl_whitelist_build(engine);
1213 	else if (IS_GEN(i915, 11))
1214 		icl_whitelist_build(engine);
1215 	else if (IS_CANNONLAKE(i915))
1216 		cnl_whitelist_build(engine);
1217 	else if (IS_COFFEELAKE(i915))
1218 		cfl_whitelist_build(engine);
1219 	else if (IS_GEMINILAKE(i915))
1220 		glk_whitelist_build(engine);
1221 	else if (IS_KABYLAKE(i915))
1222 		kbl_whitelist_build(engine);
1223 	else if (IS_BROXTON(i915))
1224 		bxt_whitelist_build(engine);
1225 	else if (IS_SKYLAKE(i915))
1226 		skl_whitelist_build(engine);
1227 	else if (INTEL_GEN(i915) <= 8)
1228 		return;
1229 	else
1230 		MISSING_CASE(INTEL_GEN(i915));
1231 
1232 	wa_init_finish(w);
1233 }
1234 
1235 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
1236 {
1237 	const struct i915_wa_list *wal = &engine->whitelist;
1238 	struct intel_uncore *uncore = engine->uncore;
1239 	const u32 base = engine->mmio_base;
1240 	struct i915_wa *wa;
1241 	unsigned int i;
1242 
1243 	if (!wal->count)
1244 		return;
1245 
1246 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1247 		intel_uncore_write(uncore,
1248 				   RING_FORCE_TO_NONPRIV(base, i),
1249 				   i915_mmio_reg_offset(wa->reg));
1250 
1251 	/* And clear the rest just in case of garbage */
1252 	for (; i < RING_MAX_NONPRIV_SLOTS; i++)
1253 		intel_uncore_write(uncore,
1254 				   RING_FORCE_TO_NONPRIV(base, i),
1255 				   i915_mmio_reg_offset(RING_NOPID(base)));
1256 }
1257 
1258 static void
1259 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1260 {
1261 	struct drm_i915_private *i915 = engine->i915;
1262 
1263 	if (IS_GEN(i915, 11)) {
1264 		/* This is not an Wa. Enable for better image quality */
1265 		wa_masked_en(wal,
1266 			     _3D_CHICKEN3,
1267 			     _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
1268 
1269 		/* WaPipelineFlushCoherentLines:icl */
1270 		wa_write_or(wal,
1271 			    GEN8_L3SQCREG4,
1272 			    GEN8_LQSC_FLUSH_COHERENT_LINES);
1273 
1274 		/*
1275 		 * Wa_1405543622:icl
1276 		 * Formerly known as WaGAPZPriorityScheme
1277 		 */
1278 		wa_write_or(wal,
1279 			    GEN8_GARBCNTL,
1280 			    GEN11_ARBITRATION_PRIO_ORDER_MASK);
1281 
1282 		/*
1283 		 * Wa_1604223664:icl
1284 		 * Formerly known as WaL3BankAddressHashing
1285 		 */
1286 		wa_write_masked_or(wal,
1287 				   GEN8_GARBCNTL,
1288 				   GEN11_HASH_CTRL_EXCL_MASK,
1289 				   GEN11_HASH_CTRL_EXCL_BIT0);
1290 		wa_write_masked_or(wal,
1291 				   GEN11_GLBLINVL,
1292 				   GEN11_BANK_HASH_ADDR_EXCL_MASK,
1293 				   GEN11_BANK_HASH_ADDR_EXCL_BIT0);
1294 
1295 		/*
1296 		 * Wa_1405733216:icl
1297 		 * Formerly known as WaDisableCleanEvicts
1298 		 */
1299 		wa_write_or(wal,
1300 			    GEN8_L3SQCREG4,
1301 			    GEN11_LQSC_CLEAN_EVICT_DISABLE);
1302 
1303 		/* WaForwardProgressSoftReset:icl */
1304 		wa_write_or(wal,
1305 			    GEN10_SCRATCH_LNCF2,
1306 			    PMFLUSHDONE_LNICRSDROP |
1307 			    PMFLUSH_GAPL3UNBLOCK |
1308 			    PMFLUSHDONE_LNEBLK);
1309 
1310 		/* Wa_1406609255:icl (pre-prod) */
1311 		if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
1312 			wa_write_or(wal,
1313 				    GEN7_SARCHKMD,
1314 				    GEN7_DISABLE_DEMAND_PREFETCH);
1315 
1316 		/* Wa_1606682166:icl */
1317 		wa_write_or(wal,
1318 			    GEN7_SARCHKMD,
1319 			    GEN7_DISABLE_SAMPLER_PREFETCH);
1320 
1321 		/* Wa_1409178092:icl */
1322 		wa_write_masked_or(wal,
1323 				   GEN11_SCRATCH2,
1324 				   GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
1325 				   0);
1326 	}
1327 
1328 	if (IS_GEN_RANGE(i915, 9, 11)) {
1329 		/* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
1330 		wa_masked_en(wal,
1331 			     GEN7_FF_SLICE_CS_CHICKEN1,
1332 			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
1333 	}
1334 
1335 	if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
1336 		/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
1337 		wa_write_or(wal,
1338 			    GEN8_GARBCNTL,
1339 			    GEN9_GAPS_TSV_CREDIT_DISABLE);
1340 	}
1341 
1342 	if (IS_BROXTON(i915)) {
1343 		/* WaDisablePooledEuLoadBalancingFix:bxt */
1344 		wa_masked_en(wal,
1345 			     FF_SLICE_CS_CHICKEN2,
1346 			     GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1347 	}
1348 
1349 	if (IS_GEN(i915, 9)) {
1350 		/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
1351 		wa_masked_en(wal,
1352 			     GEN9_CSFE_CHICKEN1_RCS,
1353 			     GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
1354 
1355 		/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
1356 		wa_write_or(wal,
1357 			    BDW_SCRATCH1,
1358 			    GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
1359 
1360 		/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
1361 		if (IS_GEN9_LP(i915))
1362 			wa_write_masked_or(wal,
1363 					   GEN8_L3SQCREG1,
1364 					   L3_PRIO_CREDITS_MASK,
1365 					   L3_GENERAL_PRIO_CREDITS(62) |
1366 					   L3_HIGH_PRIO_CREDITS(2));
1367 
1368 		/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
1369 		wa_write_or(wal,
1370 			    GEN8_L3SQCREG4,
1371 			    GEN8_LQSC_FLUSH_COHERENT_LINES);
1372 	}
1373 }
1374 
1375 static void
1376 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1377 {
1378 	struct drm_i915_private *i915 = engine->i915;
1379 
1380 	/* WaKBLVECSSemaphoreWaitPoll:kbl */
1381 	if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
1382 		wa_write(wal,
1383 			 RING_SEMA_WAIT_POLL(engine->mmio_base),
1384 			 1);
1385 	}
1386 }
1387 
1388 static void
1389 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
1390 {
1391 	if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
1392 		return;
1393 
1394 	if (engine->class == RENDER_CLASS)
1395 		rcs_engine_wa_init(engine, wal);
1396 	else
1397 		xcs_engine_wa_init(engine, wal);
1398 }
1399 
1400 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
1401 {
1402 	struct i915_wa_list *wal = &engine->wa_list;
1403 
1404 	if (INTEL_GEN(engine->i915) < 8)
1405 		return;
1406 
1407 	wa_init_start(wal, "engine", engine->name);
1408 	engine_init_workarounds(engine, wal);
1409 	wa_init_finish(wal);
1410 }
1411 
1412 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
1413 {
1414 	wa_list_apply(engine->uncore, &engine->wa_list);
1415 }
1416 
1417 static struct i915_vma *
1418 create_scratch(struct i915_address_space *vm, int count)
1419 {
1420 	struct drm_i915_gem_object *obj;
1421 	struct i915_vma *vma;
1422 	unsigned int size;
1423 	int err;
1424 
1425 	size = round_up(count * sizeof(u32), PAGE_SIZE);
1426 	obj = i915_gem_object_create_internal(vm->i915, size);
1427 	if (IS_ERR(obj))
1428 		return ERR_CAST(obj);
1429 
1430 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
1431 
1432 	vma = i915_vma_instance(obj, vm, NULL);
1433 	if (IS_ERR(vma)) {
1434 		err = PTR_ERR(vma);
1435 		goto err_obj;
1436 	}
1437 
1438 	err = i915_vma_pin(vma, 0, 0,
1439 			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
1440 	if (err)
1441 		goto err_obj;
1442 
1443 	return vma;
1444 
1445 err_obj:
1446 	i915_gem_object_put(obj);
1447 	return ERR_PTR(err);
1448 }
1449 
1450 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
1451 {
1452 	/*
1453 	 * Registers in this range are affected by the MCR selector
1454 	 * which only controls CPU initiated MMIO. Routing does not
1455 	 * work for CS access so we cannot verify them on this path.
1456 	 */
1457 	if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
1458 		return true;
1459 
1460 	return false;
1461 }
1462 
1463 static int
1464 wa_list_srm(struct i915_request *rq,
1465 	    const struct i915_wa_list *wal,
1466 	    struct i915_vma *vma)
1467 {
1468 	struct drm_i915_private *i915 = rq->i915;
1469 	unsigned int i, count = 0;
1470 	const struct i915_wa *wa;
1471 	u32 srm, *cs;
1472 
1473 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1474 	if (INTEL_GEN(i915) >= 8)
1475 		srm++;
1476 
1477 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1478 		if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
1479 			count++;
1480 	}
1481 
1482 	cs = intel_ring_begin(rq, 4 * count);
1483 	if (IS_ERR(cs))
1484 		return PTR_ERR(cs);
1485 
1486 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1487 		u32 offset = i915_mmio_reg_offset(wa->reg);
1488 
1489 		if (mcr_range(i915, offset))
1490 			continue;
1491 
1492 		*cs++ = srm;
1493 		*cs++ = offset;
1494 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
1495 		*cs++ = 0;
1496 	}
1497 	intel_ring_advance(rq, cs);
1498 
1499 	return 0;
1500 }
1501 
1502 static int engine_wa_list_verify(struct intel_context *ce,
1503 				 const struct i915_wa_list * const wal,
1504 				 const char *from)
1505 {
1506 	const struct i915_wa *wa;
1507 	struct i915_request *rq;
1508 	struct i915_vma *vma;
1509 	unsigned int i;
1510 	u32 *results;
1511 	int err;
1512 
1513 	if (!wal->count)
1514 		return 0;
1515 
1516 	vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
1517 	if (IS_ERR(vma))
1518 		return PTR_ERR(vma);
1519 
1520 	rq = intel_context_create_request(ce);
1521 	if (IS_ERR(rq)) {
1522 		err = PTR_ERR(rq);
1523 		goto err_vma;
1524 	}
1525 
1526 	err = wa_list_srm(rq, wal, vma);
1527 	if (err)
1528 		goto err_vma;
1529 
1530 	i915_request_add(rq);
1531 	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1532 		err = -ETIME;
1533 		goto err_vma;
1534 	}
1535 
1536 	results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
1537 	if (IS_ERR(results)) {
1538 		err = PTR_ERR(results);
1539 		goto err_vma;
1540 	}
1541 
1542 	err = 0;
1543 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1544 		if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
1545 			continue;
1546 
1547 		if (!wa_verify(wa, results[i], wal->name, from))
1548 			err = -ENXIO;
1549 	}
1550 
1551 	i915_gem_object_unpin_map(vma->obj);
1552 
1553 err_vma:
1554 	i915_vma_unpin(vma);
1555 	i915_vma_put(vma);
1556 	return err;
1557 }
1558 
1559 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
1560 				    const char *from)
1561 {
1562 	return engine_wa_list_verify(engine->kernel_context,
1563 				     &engine->wa_list,
1564 				     from);
1565 }
1566 
1567 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1568 #include "selftest_workarounds.c"
1569 #endif
1570