1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 
8 #include "intel_gt_mcr.h"
9 #include "intel_gt_regs.h"
10 
11 /**
12  * DOC: GT Multicast/Replicated (MCR) Register Support
13  *
14  * Some GT registers are designed as "multicast" or "replicated" registers:
15  * multiple instances of the same register share a single MMIO offset.  MCR
16  * registers are generally used when the hardware needs to potentially track
17  * independent values of a register per hardware unit (e.g., per-subslice,
18  * per-L3bank, etc.).  The specific types of replication that exist vary
19  * per-platform.
20  *
21  * MMIO accesses to MCR registers are controlled according to the settings
22  * programmed in the platform's MCR_SELECTOR register(s).  MMIO writes to MCR
23  * registers can be done in either a (i.e., a single write updates all
24  * instances of the register to the same value) or unicast (a write updates only
25  * one specific instance).  Reads of MCR registers always operate in a unicast
26  * manner regardless of how the multicast/unicast bit is set in MCR_SELECTOR.
27  * Selection of a specific MCR instance for unicast operations is referred to
28  * as "steering."
29  *
30  * If MCR register operations are steered toward a hardware unit that is
31  * fused off or currently powered down due to power gating, the MMIO operation
32  * is "terminated" by the hardware.  Terminated read operations will return a
33  * value of zero and terminated unicast write operations will be silently
34  * ignored.
35  */
36 
37 #define HAS_MSLICE_STEERING(dev_priv)	(INTEL_INFO(dev_priv)->has_mslice_steering)
38 
39 static const char * const intel_steering_types[] = {
40 	"L3BANK",
41 	"MSLICE",
42 	"LNCF",
43 	"GAM",
44 	"DSS",
45 	"OADDRM",
46 	"INSTANCE 0",
47 };
48 
49 static const struct intel_mmio_range icl_l3bank_steering_table[] = {
50 	{ 0x00B100, 0x00B3FF },
51 	{},
52 };
53 
54 /*
55  * Although the bspec lists more "MSLICE" ranges than shown here, some of those
56  * are of a "GAM" subclass that has special rules.  Thus we use a separate
57  * GAM table farther down for those.
58  */
59 static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
60 	{ 0x00DD00, 0x00DDFF },
61 	{ 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
62 	{},
63 };
64 
65 static const struct intel_mmio_range xehpsdv_gam_steering_table[] = {
66 	{ 0x004000, 0x004AFF },
67 	{ 0x00C800, 0x00CFFF },
68 	{},
69 };
70 
71 static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
72 	{ 0x00B000, 0x00B0FF },
73 	{ 0x00D800, 0x00D8FF },
74 	{},
75 };
76 
77 static const struct intel_mmio_range dg2_lncf_steering_table[] = {
78 	{ 0x00B000, 0x00B0FF },
79 	{ 0x00D880, 0x00D8FF },
80 	{},
81 };
82 
83 /*
84  * We have several types of MCR registers on PVC where steering to (0,0)
85  * will always provide us with a non-terminated value.  We'll stick them
86  * all in the same table for simplicity.
87  */
88 static const struct intel_mmio_range pvc_instance0_steering_table[] = {
89 	{ 0x004000, 0x004AFF },		/* HALF-BSLICE */
90 	{ 0x008800, 0x00887F },		/* CC */
91 	{ 0x008A80, 0x008AFF },		/* TILEPSMI */
92 	{ 0x00B000, 0x00B0FF },		/* HALF-BSLICE */
93 	{ 0x00B100, 0x00B3FF },		/* L3BANK */
94 	{ 0x00C800, 0x00CFFF },		/* HALF-BSLICE */
95 	{ 0x00D800, 0x00D8FF },		/* HALF-BSLICE */
96 	{ 0x00DD00, 0x00DDFF },		/* BSLICE */
97 	{ 0x00E900, 0x00E9FF },		/* HALF-BSLICE */
98 	{ 0x00EC00, 0x00EEFF },		/* HALF-BSLICE */
99 	{ 0x00F000, 0x00FFFF },		/* HALF-BSLICE */
100 	{ 0x024180, 0x0241FF },		/* HALF-BSLICE */
101 	{},
102 };
103 
104 static const struct intel_mmio_range xelpg_instance0_steering_table[] = {
105 	{ 0x000B00, 0x000BFF },         /* SQIDI */
106 	{ 0x001000, 0x001FFF },         /* SQIDI */
107 	{ 0x004000, 0x0048FF },         /* GAM */
108 	{ 0x008700, 0x0087FF },         /* SQIDI */
109 	{ 0x00B000, 0x00B0FF },         /* NODE */
110 	{ 0x00C800, 0x00CFFF },         /* GAM */
111 	{ 0x00D880, 0x00D8FF },         /* NODE */
112 	{ 0x00DD00, 0x00DDFF },         /* OAAL2 */
113 	{},
114 };
115 
116 static const struct intel_mmio_range xelpg_l3bank_steering_table[] = {
117 	{ 0x00B100, 0x00B3FF },
118 	{},
119 };
120 
121 /* DSS steering is used for SLICE ranges as well */
122 static const struct intel_mmio_range xelpg_dss_steering_table[] = {
123 	{ 0x005200, 0x0052FF },		/* SLICE */
124 	{ 0x005500, 0x007FFF },		/* SLICE */
125 	{ 0x008140, 0x00815F },		/* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */
126 	{ 0x0094D0, 0x00955F },		/* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */
127 	{ 0x009680, 0x0096FF },		/* DSS */
128 	{ 0x00D800, 0x00D87F },		/* SLICE */
129 	{ 0x00DC00, 0x00DCFF },		/* SLICE */
130 	{ 0x00DE80, 0x00E8FF },		/* DSS (0xE000-0xE0FF reserved) */
131 };
132 
133 static const struct intel_mmio_range xelpmp_oaddrm_steering_table[] = {
134 	{ 0x393200, 0x39323F },
135 	{ 0x393400, 0x3934FF },
136 };
137 
138 void intel_gt_mcr_init(struct intel_gt *gt)
139 {
140 	struct drm_i915_private *i915 = gt->i915;
141 	unsigned long fuse;
142 	int i;
143 
144 	/*
145 	 * An mslice is unavailable only if both the meml3 for the slice is
146 	 * disabled *and* all of the DSS in the slice (quadrant) are disabled.
147 	 */
148 	if (HAS_MSLICE_STEERING(i915)) {
149 		gt->info.mslice_mask =
150 			intel_slicemask_from_xehp_dssmask(gt->info.sseu.subslice_mask,
151 							  GEN_DSS_PER_MSLICE);
152 		gt->info.mslice_mask |=
153 			(intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
154 			 GEN12_MEML3_EN_MASK);
155 
156 		if (!gt->info.mslice_mask) /* should be impossible! */
157 			drm_warn(&i915->drm, "mslice mask all zero!\n");
158 	}
159 
160 	if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
161 		gt->steering_table[OADDRM] = xelpmp_oaddrm_steering_table;
162 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
163 		fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
164 				     intel_uncore_read(gt->uncore, XEHP_FUSE4));
165 
166 		/*
167 		 * Despite the register field being named "exclude mask" the
168 		 * bits actually represent enabled banks (two banks per bit).
169 		 */
170 		for_each_set_bit(i, &fuse, 3)
171 			gt->info.l3bank_mask |= 0x3 << 2 * i;
172 
173 		gt->steering_table[INSTANCE0] = xelpg_instance0_steering_table;
174 		gt->steering_table[L3BANK] = xelpg_l3bank_steering_table;
175 		gt->steering_table[DSS] = xelpg_dss_steering_table;
176 	} else if (IS_PONTEVECCHIO(i915)) {
177 		gt->steering_table[INSTANCE0] = pvc_instance0_steering_table;
178 	} else if (IS_DG2(i915)) {
179 		gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
180 		gt->steering_table[LNCF] = dg2_lncf_steering_table;
181 		/*
182 		 * No need to hook up the GAM table since it has a dedicated
183 		 * steering control register on DG2 and can use implicit
184 		 * steering.
185 		 */
186 	} else if (IS_XEHPSDV(i915)) {
187 		gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
188 		gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
189 		gt->steering_table[GAM] = xehpsdv_gam_steering_table;
190 	} else if (GRAPHICS_VER(i915) >= 11 &&
191 		   GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
192 		gt->steering_table[L3BANK] = icl_l3bank_steering_table;
193 		gt->info.l3bank_mask =
194 			~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
195 			GEN10_L3BANK_MASK;
196 		if (!gt->info.l3bank_mask) /* should be impossible! */
197 			drm_warn(&i915->drm, "L3 bank mask is all zero!\n");
198 	} else if (GRAPHICS_VER(i915) >= 11) {
199 		/*
200 		 * We expect all modern platforms to have at least some
201 		 * type of steering that needs to be initialized.
202 		 */
203 		MISSING_CASE(INTEL_INFO(i915)->platform);
204 	}
205 }
206 
207 /*
208  * Although the rest of the driver should use MCR-specific functions to
209  * read/write MCR registers, we still use the regular intel_uncore_* functions
210  * internally to implement those, so we need a way for the functions in this
211  * file to "cast" an i915_mcr_reg_t into an i915_reg_t.
212  */
213 static i915_reg_t mcr_reg_cast(const i915_mcr_reg_t mcr)
214 {
215 	i915_reg_t r = { .reg = mcr.reg };
216 
217 	return r;
218 }
219 
220 /*
221  * rw_with_mcr_steering_fw - Access a register with specific MCR steering
222  * @uncore: pointer to struct intel_uncore
223  * @reg: register being accessed
224  * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
225  * @group: group number (documented as "sliceid" on older platforms)
226  * @instance: instance number (documented as "subsliceid" on older platforms)
227  * @value: register value to be written (ignored for read)
228  *
229  * Return: 0 for write access. register value for read access.
230  *
231  * Caller needs to make sure the relevant forcewake wells are up.
232  */
233 static u32 rw_with_mcr_steering_fw(struct intel_uncore *uncore,
234 				   i915_mcr_reg_t reg, u8 rw_flag,
235 				   int group, int instance, u32 value)
236 {
237 	u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
238 
239 	lockdep_assert_held(&uncore->lock);
240 
241 	if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70)) {
242 		/*
243 		 * Always leave the hardware in multicast mode when doing reads
244 		 * (see comment about Wa_22013088509 below) and only change it
245 		 * to unicast mode when doing writes of a specific instance.
246 		 *
247 		 * No need to save old steering reg value.
248 		 */
249 		intel_uncore_write_fw(uncore, MTL_MCR_SELECTOR,
250 				      REG_FIELD_PREP(MTL_MCR_GROUPID, group) |
251 				      REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance) |
252 				      (rw_flag == FW_REG_READ) ? GEN11_MCR_MULTICAST : 0);
253 	} else if (GRAPHICS_VER(uncore->i915) >= 11) {
254 		mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
255 		mcr_ss = GEN11_MCR_SLICE(group) | GEN11_MCR_SUBSLICE(instance);
256 
257 		/*
258 		 * Wa_22013088509
259 		 *
260 		 * The setting of the multicast/unicast bit usually wouldn't
261 		 * matter for read operations (which always return the value
262 		 * from a single register instance regardless of how that bit
263 		 * is set), but some platforms have a workaround requiring us
264 		 * to remain in multicast mode for reads.  There's no real
265 		 * downside to this, so we'll just go ahead and do so on all
266 		 * platforms; we'll only clear the multicast bit from the mask
267 		 * when exlicitly doing a write operation.
268 		 */
269 		if (rw_flag == FW_REG_WRITE)
270 			mcr_mask |= GEN11_MCR_MULTICAST;
271 
272 		mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
273 		old_mcr = mcr;
274 
275 		mcr &= ~mcr_mask;
276 		mcr |= mcr_ss;
277 		intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
278 	} else {
279 		mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
280 		mcr_ss = GEN8_MCR_SLICE(group) | GEN8_MCR_SUBSLICE(instance);
281 
282 		mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
283 		old_mcr = mcr;
284 
285 		mcr &= ~mcr_mask;
286 		mcr |= mcr_ss;
287 		intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
288 	}
289 
290 	if (rw_flag == FW_REG_READ)
291 		val = intel_uncore_read_fw(uncore, mcr_reg_cast(reg));
292 	else
293 		intel_uncore_write_fw(uncore, mcr_reg_cast(reg), value);
294 
295 	/*
296 	 * For pre-MTL platforms, we need to restore the old value of the
297 	 * steering control register to ensure that implicit steering continues
298 	 * to behave as expected.  For MTL and beyond, we need only reinstate
299 	 * the 'multicast' bit (and only if we did a write that cleared it).
300 	 */
301 	if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70) && rw_flag == FW_REG_WRITE)
302 		intel_uncore_write_fw(uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
303 	else if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 70))
304 		intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, old_mcr);
305 
306 	return val;
307 }
308 
309 static u32 rw_with_mcr_steering(struct intel_uncore *uncore,
310 				i915_mcr_reg_t reg, u8 rw_flag,
311 				int group, int instance,
312 				u32 value)
313 {
314 	enum forcewake_domains fw_domains;
315 	u32 val;
316 
317 	fw_domains = intel_uncore_forcewake_for_reg(uncore, mcr_reg_cast(reg),
318 						    rw_flag);
319 	fw_domains |= intel_uncore_forcewake_for_reg(uncore,
320 						     GEN8_MCR_SELECTOR,
321 						     FW_REG_READ | FW_REG_WRITE);
322 
323 	spin_lock_irq(&uncore->lock);
324 	intel_uncore_forcewake_get__locked(uncore, fw_domains);
325 
326 	val = rw_with_mcr_steering_fw(uncore, reg, rw_flag, group, instance, value);
327 
328 	intel_uncore_forcewake_put__locked(uncore, fw_domains);
329 	spin_unlock_irq(&uncore->lock);
330 
331 	return val;
332 }
333 
334 /**
335  * intel_gt_mcr_read - read a specific instance of an MCR register
336  * @gt: GT structure
337  * @reg: the MCR register to read
338  * @group: the MCR group
339  * @instance: the MCR instance
340  *
341  * Returns the value read from an MCR register after steering toward a specific
342  * group/instance.
343  */
344 u32 intel_gt_mcr_read(struct intel_gt *gt,
345 		      i915_mcr_reg_t reg,
346 		      int group, int instance)
347 {
348 	return rw_with_mcr_steering(gt->uncore, reg, FW_REG_READ, group, instance, 0);
349 }
350 
351 /**
352  * intel_gt_mcr_unicast_write - write a specific instance of an MCR register
353  * @gt: GT structure
354  * @reg: the MCR register to write
355  * @value: value to write
356  * @group: the MCR group
357  * @instance: the MCR instance
358  *
359  * Write an MCR register in unicast mode after steering toward a specific
360  * group/instance.
361  */
362 void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value,
363 				int group, int instance)
364 {
365 	rw_with_mcr_steering(gt->uncore, reg, FW_REG_WRITE, group, instance, value);
366 }
367 
368 /**
369  * intel_gt_mcr_multicast_write - write a value to all instances of an MCR register
370  * @gt: GT structure
371  * @reg: the MCR register to write
372  * @value: value to write
373  *
374  * Write an MCR register in multicast mode to update all instances.
375  */
376 void intel_gt_mcr_multicast_write(struct intel_gt *gt,
377 				  i915_mcr_reg_t reg, u32 value)
378 {
379 	/*
380 	 * Ensure we have multicast behavior, just in case some non-i915 agent
381 	 * left the hardware in unicast mode.
382 	 */
383 	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
384 		intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
385 
386 	intel_uncore_write(gt->uncore, mcr_reg_cast(reg), value);
387 }
388 
389 /**
390  * intel_gt_mcr_multicast_write_fw - write a value to all instances of an MCR register
391  * @gt: GT structure
392  * @reg: the MCR register to write
393  * @value: value to write
394  *
395  * Write an MCR register in multicast mode to update all instances.  This
396  * function assumes the caller is already holding any necessary forcewake
397  * domains; use intel_gt_mcr_multicast_write() in cases where forcewake should
398  * be obtained automatically.
399  */
400 void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value)
401 {
402 	/*
403 	 * Ensure we have multicast behavior, just in case some non-i915 agent
404 	 * left the hardware in unicast mode.
405 	 */
406 	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
407 		intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
408 
409 	intel_uncore_write_fw(gt->uncore, mcr_reg_cast(reg), value);
410 }
411 
412 /**
413  * intel_gt_mcr_multicast_rmw - Performs a multicast RMW operations
414  * @gt: GT structure
415  * @reg: the MCR register to read and write
416  * @clear: bits to clear during RMW
417  * @set: bits to set during RMW
418  *
419  * Performs a read-modify-write on an MCR register in a multicast manner.
420  * This operation only makes sense on MCR registers where all instances are
421  * expected to have the same value.  The read will target any non-terminated
422  * instance and the write will be applied to all instances.
423  *
424  * This function assumes the caller is already holding any necessary forcewake
425  * domains; use intel_gt_mcr_multicast_rmw() in cases where forcewake should
426  * be obtained automatically.
427  *
428  * Returns the old (unmodified) value read.
429  */
430 u32 intel_gt_mcr_multicast_rmw(struct intel_gt *gt, i915_mcr_reg_t reg,
431 			       u32 clear, u32 set)
432 {
433 	u32 val = intel_gt_mcr_read_any(gt, reg);
434 
435 	intel_gt_mcr_multicast_write(gt, reg, (val & ~clear) | set);
436 
437 	return val;
438 }
439 
440 /*
441  * reg_needs_read_steering - determine whether a register read requires
442  *     explicit steering
443  * @gt: GT structure
444  * @reg: the register to check steering requirements for
445  * @type: type of multicast steering to check
446  *
447  * Determines whether @reg needs explicit steering of a specific type for
448  * reads.
449  *
450  * Returns false if @reg does not belong to a register range of the given
451  * steering type, or if the default (subslice-based) steering IDs are suitable
452  * for @type steering too.
453  */
454 static bool reg_needs_read_steering(struct intel_gt *gt,
455 				    i915_mcr_reg_t reg,
456 				    enum intel_steering_type type)
457 {
458 	const u32 offset = i915_mmio_reg_offset(reg);
459 	const struct intel_mmio_range *entry;
460 
461 	if (likely(!gt->steering_table[type]))
462 		return false;
463 
464 	for (entry = gt->steering_table[type]; entry->end; entry++) {
465 		if (offset >= entry->start && offset <= entry->end)
466 			return true;
467 	}
468 
469 	return false;
470 }
471 
472 /*
473  * get_nonterminated_steering - determines valid IDs for a class of MCR steering
474  * @gt: GT structure
475  * @type: multicast register type
476  * @group: Group ID returned
477  * @instance: Instance ID returned
478  *
479  * Determines group and instance values that will steer reads of the specified
480  * MCR class to a non-terminated instance.
481  */
482 static void get_nonterminated_steering(struct intel_gt *gt,
483 				       enum intel_steering_type type,
484 				       u8 *group, u8 *instance)
485 {
486 	u32 dss;
487 
488 	switch (type) {
489 	case L3BANK:
490 		*group = 0;		/* unused */
491 		*instance = __ffs(gt->info.l3bank_mask);
492 		break;
493 	case MSLICE:
494 		GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
495 		*group = __ffs(gt->info.mslice_mask);
496 		*instance = 0;	/* unused */
497 		break;
498 	case LNCF:
499 		/*
500 		 * An LNCF is always present if its mslice is present, so we
501 		 * can safely just steer to LNCF 0 in all cases.
502 		 */
503 		GEM_WARN_ON(!HAS_MSLICE_STEERING(gt->i915));
504 		*group = __ffs(gt->info.mslice_mask) << 1;
505 		*instance = 0;	/* unused */
506 		break;
507 	case GAM:
508 		*group = IS_DG2(gt->i915) ? 1 : 0;
509 		*instance = 0;
510 		break;
511 	case DSS:
512 		dss = intel_sseu_find_first_xehp_dss(&gt->info.sseu, 0, 0);
513 		*group = dss / GEN_DSS_PER_GSLICE;
514 		*instance = dss % GEN_DSS_PER_GSLICE;
515 		break;
516 	case INSTANCE0:
517 		/*
518 		 * There are a lot of MCR types for which instance (0, 0)
519 		 * will always provide a non-terminated value.
520 		 */
521 		*group = 0;
522 		*instance = 0;
523 		break;
524 	case OADDRM:
525 		if ((VDBOX_MASK(gt) | VEBOX_MASK(gt) | gt->info.sfc_mask) & BIT(0))
526 			*group = 0;
527 		else
528 			*group = 1;
529 		*instance = 0;
530 		break;
531 	default:
532 		MISSING_CASE(type);
533 		*group = 0;
534 		*instance = 0;
535 	}
536 }
537 
538 /**
539  * intel_gt_mcr_get_nonterminated_steering - find group/instance values that
540  *    will steer a register to a non-terminated instance
541  * @gt: GT structure
542  * @reg: register for which the steering is required
543  * @group: return variable for group steering
544  * @instance: return variable for instance steering
545  *
546  * This function returns a group/instance pair that is guaranteed to work for
547  * read steering of the given register. Note that a value will be returned even
548  * if the register is not replicated and therefore does not actually require
549  * steering.
550  */
551 void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
552 					     i915_mcr_reg_t reg,
553 					     u8 *group, u8 *instance)
554 {
555 	int type;
556 
557 	for (type = 0; type < NUM_STEERING_TYPES; type++) {
558 		if (reg_needs_read_steering(gt, reg, type)) {
559 			get_nonterminated_steering(gt, type, group, instance);
560 			return;
561 		}
562 	}
563 
564 	*group = gt->default_steering.groupid;
565 	*instance = gt->default_steering.instanceid;
566 }
567 
568 /**
569  * intel_gt_mcr_read_any_fw - reads one instance of an MCR register
570  * @gt: GT structure
571  * @reg: register to read
572  *
573  * Reads a GT MCR register.  The read will be steered to a non-terminated
574  * instance (i.e., one that isn't fused off or powered down by power gating).
575  * This function assumes the caller is already holding any necessary forcewake
576  * domains; use intel_gt_mcr_read_any() in cases where forcewake should be
577  * obtained automatically.
578  *
579  * Returns the value from a non-terminated instance of @reg.
580  */
581 u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg)
582 {
583 	int type;
584 	u8 group, instance;
585 
586 	for (type = 0; type < NUM_STEERING_TYPES; type++) {
587 		if (reg_needs_read_steering(gt, reg, type)) {
588 			get_nonterminated_steering(gt, type, &group, &instance);
589 			return rw_with_mcr_steering_fw(gt->uncore, reg,
590 						       FW_REG_READ,
591 						       group, instance, 0);
592 		}
593 	}
594 
595 	return intel_uncore_read_fw(gt->uncore, mcr_reg_cast(reg));
596 }
597 
598 /**
599  * intel_gt_mcr_read_any - reads one instance of an MCR register
600  * @gt: GT structure
601  * @reg: register to read
602  *
603  * Reads a GT MCR register.  The read will be steered to a non-terminated
604  * instance (i.e., one that isn't fused off or powered down by power gating).
605  *
606  * Returns the value from a non-terminated instance of @reg.
607  */
608 u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg)
609 {
610 	int type;
611 	u8 group, instance;
612 
613 	for (type = 0; type < NUM_STEERING_TYPES; type++) {
614 		if (reg_needs_read_steering(gt, reg, type)) {
615 			get_nonterminated_steering(gt, type, &group, &instance);
616 			return rw_with_mcr_steering(gt->uncore, reg,
617 						    FW_REG_READ,
618 						    group, instance, 0);
619 		}
620 	}
621 
622 	return intel_uncore_read(gt->uncore, mcr_reg_cast(reg));
623 }
624 
625 static void report_steering_type(struct drm_printer *p,
626 				 struct intel_gt *gt,
627 				 enum intel_steering_type type,
628 				 bool dump_table)
629 {
630 	const struct intel_mmio_range *entry;
631 	u8 group, instance;
632 
633 	BUILD_BUG_ON(ARRAY_SIZE(intel_steering_types) != NUM_STEERING_TYPES);
634 
635 	if (!gt->steering_table[type]) {
636 		drm_printf(p, "%s steering: uses default steering\n",
637 			   intel_steering_types[type]);
638 		return;
639 	}
640 
641 	get_nonterminated_steering(gt, type, &group, &instance);
642 	drm_printf(p, "%s steering: group=0x%x, instance=0x%x\n",
643 		   intel_steering_types[type], group, instance);
644 
645 	if (!dump_table)
646 		return;
647 
648 	for (entry = gt->steering_table[type]; entry->end; entry++)
649 		drm_printf(p, "\t0x%06x - 0x%06x\n", entry->start, entry->end);
650 }
651 
652 void intel_gt_mcr_report_steering(struct drm_printer *p, struct intel_gt *gt,
653 				  bool dump_table)
654 {
655 	/*
656 	 * Starting with MTL we no longer have default steering;
657 	 * all ranges are explicitly steered.
658 	 */
659 	if (GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70))
660 		drm_printf(p, "Default steering: group=0x%x, instance=0x%x\n",
661 			   gt->default_steering.groupid,
662 			   gt->default_steering.instanceid);
663 
664 	if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70)) {
665 		for (int i = 0; i < NUM_STEERING_TYPES; i++)
666 			if (gt->steering_table[i])
667 				report_steering_type(p, gt, i, dump_table);
668 	} else if (IS_PONTEVECCHIO(gt->i915)) {
669 		report_steering_type(p, gt, INSTANCE0, dump_table);
670 	} else if (HAS_MSLICE_STEERING(gt->i915)) {
671 		report_steering_type(p, gt, MSLICE, dump_table);
672 		report_steering_type(p, gt, LNCF, dump_table);
673 	}
674 }
675 
676 /**
677  * intel_gt_mcr_get_ss_steering - returns the group/instance steering for a SS
678  * @gt: GT structure
679  * @dss: DSS ID to obtain steering for
680  * @group: pointer to storage for steering group ID
681  * @instance: pointer to storage for steering instance ID
682  *
683  * Returns the steering IDs (via the @group and @instance parameters) that
684  * correspond to a specific subslice/DSS ID.
685  */
686 void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
687 				   unsigned int *group, unsigned int *instance)
688 {
689 	if (IS_PONTEVECCHIO(gt->i915)) {
690 		*group = dss / GEN_DSS_PER_CSLICE;
691 		*instance = dss % GEN_DSS_PER_CSLICE;
692 	} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) {
693 		*group = dss / GEN_DSS_PER_GSLICE;
694 		*instance = dss % GEN_DSS_PER_GSLICE;
695 	} else {
696 		*group = dss / GEN_MAX_SS_PER_HSW_SLICE;
697 		*instance = dss % GEN_MAX_SS_PER_HSW_SLICE;
698 		return;
699 	}
700 }
701 
702 /**
703  * intel_gt_mcr_wait_for_reg_fw - wait until MCR register matches expected state
704  * @gt: GT structure
705  * @reg: the register to read
706  * @mask: mask to apply to register value
707  * @value: value to wait for
708  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
709  * @slow_timeout_ms: slow timeout in millisecond
710  *
711  * This routine waits until the target register @reg contains the expected
712  * @value after applying the @mask, i.e. it waits until ::
713  *
714  *     (intel_gt_mcr_read_any_fw(gt, reg) & mask) == value
715  *
716  * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
717  * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
718  * must be not larger than 20,0000 microseconds.
719  *
720  * This function is basically an MCR-friendly version of
721  * __intel_wait_for_register_fw().  Generally this function will only be used
722  * on GAM registers which are a bit special --- although they're MCR registers,
723  * reads (e.g., waiting for status updates) are always directed to the primary
724  * instance.
725  *
726  * Note that this routine assumes the caller holds forcewake asserted, it is
727  * not suitable for very long waits.
728  *
729  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
730  */
731 int intel_gt_mcr_wait_for_reg_fw(struct intel_gt *gt,
732 				 i915_mcr_reg_t reg,
733 				 u32 mask,
734 				 u32 value,
735 				 unsigned int fast_timeout_us,
736 				 unsigned int slow_timeout_ms)
737 {
738 	u32 reg_value = 0;
739 #define done (((reg_value = intel_gt_mcr_read_any_fw(gt, reg)) & mask) == value)
740 	int ret;
741 
742 	/* Catch any overuse of this function */
743 	might_sleep_if(slow_timeout_ms);
744 	GEM_BUG_ON(fast_timeout_us > 20000);
745 	GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
746 
747 	ret = -ETIMEDOUT;
748 	if (fast_timeout_us && fast_timeout_us <= 20000)
749 		ret = _wait_for_atomic(done, fast_timeout_us, 0);
750 	if (ret && slow_timeout_ms)
751 		ret = wait_for(done, slow_timeout_ms);
752 
753 	return ret;
754 #undef done
755 }
756