xref: /openbmc/linux/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c (revision f43e47c090dc7fe32d5410d8740c3a004eb2676f)
1 // SPDX-License-Identifier: MIT
2 
3 /*
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/seq_file.h>
8 #include <linux/string_helpers.h>
9 
10 #include "i915_drv.h"
11 #include "i915_reg.h"
12 #include "intel_gt.h"
13 #include "intel_gt_clock_utils.h"
14 #include "intel_gt_debugfs.h"
15 #include "intel_gt_pm.h"
16 #include "intel_gt_pm_debugfs.h"
17 #include "intel_gt_regs.h"
18 #include "intel_llc.h"
19 #include "intel_mchbar_regs.h"
20 #include "intel_pcode.h"
21 #include "intel_rc6.h"
22 #include "intel_rps.h"
23 #include "intel_runtime_pm.h"
24 #include "intel_uncore.h"
25 #include "vlv_sideband.h"
26 
27 void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
28 {
29 	atomic_inc(&gt->user_wakeref);
30 	intel_gt_pm_get(gt);
31 	if (GRAPHICS_VER(gt->i915) >= 6)
32 		intel_uncore_forcewake_user_get(gt->uncore);
33 }
34 
35 void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
36 {
37 	if (GRAPHICS_VER(gt->i915) >= 6)
38 		intel_uncore_forcewake_user_put(gt->uncore);
39 	intel_gt_pm_put(gt);
40 	atomic_dec(&gt->user_wakeref);
41 }
42 
43 static int forcewake_user_open(struct inode *inode, struct file *file)
44 {
45 	struct intel_gt *gt = inode->i_private;
46 
47 	intel_gt_pm_debugfs_forcewake_user_open(gt);
48 
49 	return 0;
50 }
51 
52 static int forcewake_user_release(struct inode *inode, struct file *file)
53 {
54 	struct intel_gt *gt = inode->i_private;
55 
56 	intel_gt_pm_debugfs_forcewake_user_release(gt);
57 
58 	return 0;
59 }
60 
61 static const struct file_operations forcewake_user_fops = {
62 	.owner = THIS_MODULE,
63 	.open = forcewake_user_open,
64 	.release = forcewake_user_release,
65 };
66 
67 static int fw_domains_show(struct seq_file *m, void *data)
68 {
69 	struct intel_gt *gt = m->private;
70 	struct intel_uncore *uncore = gt->uncore;
71 	struct intel_uncore_forcewake_domain *fw_domain;
72 	unsigned int tmp;
73 
74 	seq_printf(m, "user.bypass_count = %u\n",
75 		   uncore->user_forcewake_count);
76 
77 	for_each_fw_domain(fw_domain, uncore, tmp)
78 		seq_printf(m, "%s.wake_count = %u\n",
79 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
80 			   READ_ONCE(fw_domain->wake_count));
81 
82 	return 0;
83 }
84 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains);
85 
86 static void print_rc6_res(struct seq_file *m,
87 			  const char *title,
88 			  const i915_reg_t reg)
89 {
90 	struct intel_gt *gt = m->private;
91 	intel_wakeref_t wakeref;
92 
93 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
94 		seq_printf(m, "%s %u (%llu us)\n", title,
95 			   intel_uncore_read(gt->uncore, reg),
96 			   intel_rc6_residency_us(&gt->rc6, reg));
97 }
98 
99 static int vlv_drpc(struct seq_file *m)
100 {
101 	struct intel_gt *gt = m->private;
102 	struct intel_uncore *uncore = gt->uncore;
103 	u32 rcctl1, pw_status, mt_fwake_req;
104 
105 	mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
106 	pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
107 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
108 
109 	seq_printf(m, "RC6 Enabled: %s\n",
110 		   str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE |
111 					GEN6_RC_CTL_EI_MODE(1))));
112 	seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
113 	seq_printf(m, "Render Power Well: %s\n",
114 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
115 	seq_printf(m, "Media Power Well: %s\n",
116 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
117 
118 	print_rc6_res(m, "Render RC6 residency since boot:", GEN6_GT_GFX_RC6);
119 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
120 
121 	return fw_domains_show(m, NULL);
122 }
123 
124 static int gen6_drpc(struct seq_file *m)
125 {
126 	struct intel_gt *gt = m->private;
127 	struct drm_i915_private *i915 = gt->i915;
128 	struct intel_uncore *uncore = gt->uncore;
129 	u32 gt_core_status, mt_fwake_req, rcctl1, rc6vids = 0;
130 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
131 
132 	mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
133 	gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
134 
135 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
136 	if (GRAPHICS_VER(i915) >= 9) {
137 		gen9_powergate_enable =
138 			intel_uncore_read(uncore, GEN9_PG_ENABLE);
139 		gen9_powergate_status =
140 			intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
141 	}
142 
143 	if (GRAPHICS_VER(i915) <= 7)
144 		snb_pcode_read(gt->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
145 
146 	seq_printf(m, "RC1e Enabled: %s\n",
147 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
148 	seq_printf(m, "RC6 Enabled: %s\n",
149 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
150 	if (GRAPHICS_VER(i915) >= 9) {
151 		seq_printf(m, "Render Well Gating Enabled: %s\n",
152 			   str_yes_no(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
153 		seq_printf(m, "Media Well Gating Enabled: %s\n",
154 			   str_yes_no(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
155 	}
156 	seq_printf(m, "Deep RC6 Enabled: %s\n",
157 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
158 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
159 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
160 	seq_puts(m, "Current RC state: ");
161 	switch (gt_core_status & GEN6_RCn_MASK) {
162 	case GEN6_RC0:
163 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
164 			seq_puts(m, "Core Power Down\n");
165 		else
166 			seq_puts(m, "on\n");
167 		break;
168 	case GEN6_RC3:
169 		seq_puts(m, "RC3\n");
170 		break;
171 	case GEN6_RC6:
172 		seq_puts(m, "RC6\n");
173 		break;
174 	case GEN6_RC7:
175 		seq_puts(m, "RC7\n");
176 		break;
177 	default:
178 		seq_puts(m, "Unknown\n");
179 		break;
180 	}
181 
182 	seq_printf(m, "Core Power Down: %s\n",
183 		   str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
184 	seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
185 	if (GRAPHICS_VER(i915) >= 9) {
186 		seq_printf(m, "Render Power Well: %s\n",
187 			   (gen9_powergate_status &
188 			    GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
189 		seq_printf(m, "Media Power Well: %s\n",
190 			   (gen9_powergate_status &
191 			    GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
192 	}
193 
194 	/* Not exactly sure what this is */
195 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
196 		      GEN6_GT_GFX_RC6_LOCKED);
197 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
198 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
199 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
200 
201 	if (GRAPHICS_VER(i915) <= 7) {
202 		seq_printf(m, "RC6   voltage: %dmV\n",
203 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
204 		seq_printf(m, "RC6+  voltage: %dmV\n",
205 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
206 		seq_printf(m, "RC6++ voltage: %dmV\n",
207 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
208 	}
209 
210 	return fw_domains_show(m, NULL);
211 }
212 
213 static int ilk_drpc(struct seq_file *m)
214 {
215 	struct intel_gt *gt = m->private;
216 	struct intel_uncore *uncore = gt->uncore;
217 	u32 rgvmodectl, rstdbyctl;
218 	u16 crstandvid;
219 
220 	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
221 	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
222 	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
223 
224 	seq_printf(m, "HD boost: %s\n",
225 		   str_yes_no(rgvmodectl & MEMMODE_BOOST_EN));
226 	seq_printf(m, "Boost freq: %d\n",
227 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
228 		   MEMMODE_BOOST_FREQ_SHIFT);
229 	seq_printf(m, "HW control enabled: %s\n",
230 		   str_yes_no(rgvmodectl & MEMMODE_HWIDLE_EN));
231 	seq_printf(m, "SW control enabled: %s\n",
232 		   str_yes_no(rgvmodectl & MEMMODE_SWMODE_EN));
233 	seq_printf(m, "Gated voltage change: %s\n",
234 		   str_yes_no(rgvmodectl & MEMMODE_RCLK_GATE));
235 	seq_printf(m, "Starting frequency: P%d\n",
236 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
237 	seq_printf(m, "Max P-state: P%d\n",
238 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
239 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
240 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
241 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
242 	seq_printf(m, "Render standby enabled: %s\n",
243 		   str_yes_no(!(rstdbyctl & RCX_SW_EXIT)));
244 	seq_puts(m, "Current RS state: ");
245 	switch (rstdbyctl & RSX_STATUS_MASK) {
246 	case RSX_STATUS_ON:
247 		seq_puts(m, "on\n");
248 		break;
249 	case RSX_STATUS_RC1:
250 		seq_puts(m, "RC1\n");
251 		break;
252 	case RSX_STATUS_RC1E:
253 		seq_puts(m, "RC1E\n");
254 		break;
255 	case RSX_STATUS_RS1:
256 		seq_puts(m, "RS1\n");
257 		break;
258 	case RSX_STATUS_RS2:
259 		seq_puts(m, "RS2 (RC6)\n");
260 		break;
261 	case RSX_STATUS_RS3:
262 		seq_puts(m, "RC3 (RC6+)\n");
263 		break;
264 	default:
265 		seq_puts(m, "unknown\n");
266 		break;
267 	}
268 
269 	return 0;
270 }
271 
272 static int drpc_show(struct seq_file *m, void *unused)
273 {
274 	struct intel_gt *gt = m->private;
275 	struct drm_i915_private *i915 = gt->i915;
276 	intel_wakeref_t wakeref;
277 	int err = -ENODEV;
278 
279 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
280 		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
281 			err = vlv_drpc(m);
282 		else if (GRAPHICS_VER(i915) >= 6)
283 			err = gen6_drpc(m);
284 		else
285 			err = ilk_drpc(m);
286 	}
287 
288 	return err;
289 }
290 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(drpc);
291 
292 void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
293 {
294 	struct drm_i915_private *i915 = gt->i915;
295 	struct intel_uncore *uncore = gt->uncore;
296 	struct intel_rps *rps = &gt->rps;
297 	intel_wakeref_t wakeref;
298 
299 	wakeref = intel_runtime_pm_get(uncore->rpm);
300 
301 	if (GRAPHICS_VER(i915) == 5) {
302 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
303 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
304 
305 		drm_printf(p, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
306 		drm_printf(p, "Requested VID: %d\n", rgvswctl & 0x3f);
307 		drm_printf(p, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
308 			   MEMSTAT_VID_SHIFT);
309 		drm_printf(p, "Current P-state: %d\n",
310 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
311 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
312 		u32 rpmodectl, freq_sts;
313 
314 		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
315 		drm_printf(p, "Video Turbo Mode: %s\n",
316 			   str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
317 		drm_printf(p, "HW control enabled: %s\n",
318 			   str_yes_no(rpmodectl & GEN6_RP_ENABLE));
319 		drm_printf(p, "SW control enabled: %s\n",
320 			   str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
321 
322 		vlv_punit_get(i915);
323 		freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
324 		vlv_punit_put(i915);
325 
326 		drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
327 		drm_printf(p, "DDR freq: %d MHz\n", i915->mem_freq);
328 
329 		drm_printf(p, "actual GPU freq: %d MHz\n",
330 			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
331 
332 		drm_printf(p, "current GPU freq: %d MHz\n",
333 			   intel_gpu_freq(rps, rps->cur_freq));
334 
335 		drm_printf(p, "max GPU freq: %d MHz\n",
336 			   intel_gpu_freq(rps, rps->max_freq));
337 
338 		drm_printf(p, "min GPU freq: %d MHz\n",
339 			   intel_gpu_freq(rps, rps->min_freq));
340 
341 		drm_printf(p, "idle GPU freq: %d MHz\n",
342 			   intel_gpu_freq(rps, rps->idle_freq));
343 
344 		drm_printf(p, "efficient (RPe) frequency: %d MHz\n",
345 			   intel_gpu_freq(rps, rps->efficient_freq));
346 	} else if (GRAPHICS_VER(i915) >= 6) {
347 		gen6_rps_frequency_dump(rps, p);
348 	} else {
349 		drm_puts(p, "no P-state info available\n");
350 	}
351 
352 	drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
353 	drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
354 	drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
355 
356 	intel_runtime_pm_put(uncore->rpm, wakeref);
357 }
358 
359 static int frequency_show(struct seq_file *m, void *unused)
360 {
361 	struct intel_gt *gt = m->private;
362 	struct drm_printer p = drm_seq_file_printer(m);
363 
364 	intel_gt_pm_frequency_dump(gt, &p);
365 
366 	return 0;
367 }
368 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(frequency);
369 
370 static int llc_show(struct seq_file *m, void *data)
371 {
372 	struct intel_gt *gt = m->private;
373 	struct drm_i915_private *i915 = gt->i915;
374 	const bool edram = GRAPHICS_VER(i915) > 8;
375 	struct intel_rps *rps = &gt->rps;
376 	unsigned int max_gpu_freq, min_gpu_freq;
377 	intel_wakeref_t wakeref;
378 	int gpu_freq, ia_freq;
379 
380 	seq_printf(m, "LLC: %s\n", str_yes_no(HAS_LLC(i915)));
381 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
382 		   i915->edram_size_mb);
383 
384 	min_gpu_freq = rps->min_freq;
385 	max_gpu_freq = rps->max_freq;
386 	if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
387 		/* Convert GT frequency to 50 HZ units */
388 		min_gpu_freq /= GEN9_FREQ_SCALER;
389 		max_gpu_freq /= GEN9_FREQ_SCALER;
390 	}
391 
392 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
393 
394 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
395 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
396 		ia_freq = gpu_freq;
397 		snb_pcode_read(gt->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
398 			       &ia_freq, NULL);
399 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
400 			   intel_gpu_freq(rps,
401 					  (gpu_freq *
402 					   (IS_GEN9_BC(i915) ||
403 					    GRAPHICS_VER(i915) >= 11 ?
404 					    GEN9_FREQ_SCALER : 1))),
405 			   ((ia_freq >> 0) & 0xff) * 100,
406 			   ((ia_freq >> 8) & 0xff) * 100);
407 	}
408 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
409 
410 	return 0;
411 }
412 
413 static bool llc_eval(void *data)
414 {
415 	struct intel_gt *gt = data;
416 
417 	return HAS_LLC(gt->i915);
418 }
419 
420 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(llc);
421 
422 static const char *rps_power_to_str(unsigned int power)
423 {
424 	static const char * const strings[] = {
425 		[LOW_POWER] = "low power",
426 		[BETWEEN] = "mixed",
427 		[HIGH_POWER] = "high power",
428 	};
429 
430 	if (power >= ARRAY_SIZE(strings) || !strings[power])
431 		return "unknown";
432 
433 	return strings[power];
434 }
435 
436 static int rps_boost_show(struct seq_file *m, void *data)
437 {
438 	struct intel_gt *gt = m->private;
439 	struct drm_i915_private *i915 = gt->i915;
440 	struct intel_rps *rps = &gt->rps;
441 
442 	seq_printf(m, "RPS enabled? %s\n",
443 		   str_yes_no(intel_rps_is_enabled(rps)));
444 	seq_printf(m, "RPS active? %s\n",
445 		   str_yes_no(intel_rps_is_active(rps)));
446 	seq_printf(m, "GPU busy? %s, %llums\n",
447 		   str_yes_no(gt->awake),
448 		   ktime_to_ms(intel_gt_get_awake_time(gt)));
449 	seq_printf(m, "Boosts outstanding? %d\n",
450 		   atomic_read(&rps->num_waiters));
451 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
452 	seq_printf(m, "Frequency requested %d, actual %d\n",
453 		   intel_gpu_freq(rps, rps->cur_freq),
454 		   intel_rps_read_actual_frequency(rps));
455 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
456 		   intel_gpu_freq(rps, rps->min_freq),
457 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
458 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
459 		   intel_gpu_freq(rps, rps->max_freq));
460 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
461 		   intel_gpu_freq(rps, rps->idle_freq),
462 		   intel_gpu_freq(rps, rps->efficient_freq),
463 		   intel_gpu_freq(rps, rps->boost_freq));
464 
465 	seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
466 
467 	if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) {
468 		struct intel_uncore *uncore = gt->uncore;
469 		u32 rpup, rpupei;
470 		u32 rpdown, rpdownei;
471 
472 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
473 		rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
474 		rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
475 		rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
476 		rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
477 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
478 
479 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
480 			   rps_power_to_str(rps->power.mode));
481 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
482 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
483 			   rps->power.up_threshold);
484 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
485 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
486 			   rps->power.down_threshold);
487 	} else {
488 		seq_puts(m, "\nRPS Autotuning inactive\n");
489 	}
490 
491 	return 0;
492 }
493 
494 static bool rps_eval(void *data)
495 {
496 	struct intel_gt *gt = data;
497 
498 	return HAS_RPS(gt->i915);
499 }
500 
501 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost);
502 
503 static int perf_limit_reasons_get(void *data, u64 *val)
504 {
505 	struct intel_gt *gt = data;
506 	intel_wakeref_t wakeref;
507 
508 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
509 		*val = intel_uncore_read(gt->uncore, intel_gt_perf_limit_reasons_reg(gt));
510 
511 	return 0;
512 }
513 
514 static int perf_limit_reasons_clear(void *data, u64 val)
515 {
516 	struct intel_gt *gt = data;
517 	intel_wakeref_t wakeref;
518 
519 	/*
520 	 * Clear the upper 16 "log" bits, the lower 16 "status" bits are
521 	 * read-only. The upper 16 "log" bits are identical to the lower 16
522 	 * "status" bits except that the "log" bits remain set until cleared.
523 	 */
524 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
525 		intel_uncore_rmw(gt->uncore, intel_gt_perf_limit_reasons_reg(gt),
526 				 GT0_PERF_LIMIT_REASONS_LOG_MASK, 0);
527 
528 	return 0;
529 }
530 
531 static bool perf_limit_reasons_eval(void *data)
532 {
533 	struct intel_gt *gt = data;
534 
535 	return i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt));
536 }
537 
538 DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get,
539 			perf_limit_reasons_clear, "%llu\n");
540 
541 void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
542 {
543 	static const struct intel_gt_debugfs_file files[] = {
544 		{ "drpc", &drpc_fops, NULL },
545 		{ "frequency", &frequency_fops, NULL },
546 		{ "forcewake", &fw_domains_fops, NULL },
547 		{ "forcewake_user", &forcewake_user_fops, NULL},
548 		{ "llc", &llc_fops, llc_eval },
549 		{ "rps_boost", &rps_boost_fops, rps_eval },
550 		{ "perf_limit_reasons", &perf_limit_reasons_fops, perf_limit_reasons_eval },
551 	};
552 
553 	intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
554 }
555