1 // SPDX-License-Identifier: MIT
2 
3 /*
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/seq_file.h>
8 #include <linux/string_helpers.h>
9 
10 #include "i915_drv.h"
11 #include "i915_reg.h"
12 #include "intel_gt.h"
13 #include "intel_gt_clock_utils.h"
14 #include "intel_gt_debugfs.h"
15 #include "intel_gt_pm.h"
16 #include "intel_gt_pm_debugfs.h"
17 #include "intel_gt_regs.h"
18 #include "intel_llc.h"
19 #include "intel_mchbar_regs.h"
20 #include "intel_pcode.h"
21 #include "intel_rc6.h"
22 #include "intel_rps.h"
23 #include "intel_runtime_pm.h"
24 #include "intel_uncore.h"
25 #include "vlv_sideband.h"
26 
27 void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
28 {
29 	atomic_inc(&gt->user_wakeref);
30 	intel_gt_pm_get(gt);
31 	if (GRAPHICS_VER(gt->i915) >= 6)
32 		intel_uncore_forcewake_user_get(gt->uncore);
33 }
34 
35 void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
36 {
37 	if (GRAPHICS_VER(gt->i915) >= 6)
38 		intel_uncore_forcewake_user_put(gt->uncore);
39 	intel_gt_pm_put(gt);
40 	atomic_dec(&gt->user_wakeref);
41 }
42 
43 static int forcewake_user_open(struct inode *inode, struct file *file)
44 {
45 	struct intel_gt *gt = inode->i_private;
46 
47 	intel_gt_pm_debugfs_forcewake_user_open(gt);
48 
49 	return 0;
50 }
51 
52 static int forcewake_user_release(struct inode *inode, struct file *file)
53 {
54 	struct intel_gt *gt = inode->i_private;
55 
56 	intel_gt_pm_debugfs_forcewake_user_release(gt);
57 
58 	return 0;
59 }
60 
61 static const struct file_operations forcewake_user_fops = {
62 	.owner = THIS_MODULE,
63 	.open = forcewake_user_open,
64 	.release = forcewake_user_release,
65 };
66 
67 static int fw_domains_show(struct seq_file *m, void *data)
68 {
69 	struct intel_gt *gt = m->private;
70 	struct intel_uncore *uncore = gt->uncore;
71 	struct intel_uncore_forcewake_domain *fw_domain;
72 	unsigned int tmp;
73 
74 	seq_printf(m, "user.bypass_count = %u\n",
75 		   uncore->user_forcewake_count);
76 
77 	for_each_fw_domain(fw_domain, uncore, tmp)
78 		seq_printf(m, "%s.wake_count = %u\n",
79 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
80 			   READ_ONCE(fw_domain->wake_count));
81 
82 	return 0;
83 }
84 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains);
85 
86 static void print_rc6_res(struct seq_file *m,
87 			  const char *title,
88 			  const i915_reg_t reg)
89 {
90 	struct intel_gt *gt = m->private;
91 	intel_wakeref_t wakeref;
92 
93 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
94 		seq_printf(m, "%s %u (%llu us)\n", title,
95 			   intel_uncore_read(gt->uncore, reg),
96 			   intel_rc6_residency_us(&gt->rc6, reg));
97 }
98 
99 static int vlv_drpc(struct seq_file *m)
100 {
101 	struct intel_gt *gt = m->private;
102 	struct intel_uncore *uncore = gt->uncore;
103 	u32 rcctl1, pw_status;
104 
105 	pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
106 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
107 
108 	seq_printf(m, "RC6 Enabled: %s\n",
109 		   str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE |
110 					GEN6_RC_CTL_EI_MODE(1))));
111 	seq_printf(m, "Render Power Well: %s\n",
112 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
113 	seq_printf(m, "Media Power Well: %s\n",
114 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
115 
116 	print_rc6_res(m, "Render RC6 residency since boot:", GEN6_GT_GFX_RC6);
117 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
118 
119 	return fw_domains_show(m, NULL);
120 }
121 
122 static int gen6_drpc(struct seq_file *m)
123 {
124 	struct intel_gt *gt = m->private;
125 	struct drm_i915_private *i915 = gt->i915;
126 	struct intel_uncore *uncore = gt->uncore;
127 	u32 gt_core_status, rcctl1, rc6vids = 0;
128 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
129 
130 	gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
131 
132 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
133 	if (GRAPHICS_VER(i915) >= 9) {
134 		gen9_powergate_enable =
135 			intel_uncore_read(uncore, GEN9_PG_ENABLE);
136 		gen9_powergate_status =
137 			intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
138 	}
139 
140 	if (GRAPHICS_VER(i915) <= 7)
141 		snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
142 
143 	seq_printf(m, "RC1e Enabled: %s\n",
144 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
145 	seq_printf(m, "RC6 Enabled: %s\n",
146 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
147 	if (GRAPHICS_VER(i915) >= 9) {
148 		seq_printf(m, "Render Well Gating Enabled: %s\n",
149 			   str_yes_no(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
150 		seq_printf(m, "Media Well Gating Enabled: %s\n",
151 			   str_yes_no(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
152 	}
153 	seq_printf(m, "Deep RC6 Enabled: %s\n",
154 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
155 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
156 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
157 	seq_puts(m, "Current RC state: ");
158 	switch (gt_core_status & GEN6_RCn_MASK) {
159 	case GEN6_RC0:
160 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
161 			seq_puts(m, "Core Power Down\n");
162 		else
163 			seq_puts(m, "on\n");
164 		break;
165 	case GEN6_RC3:
166 		seq_puts(m, "RC3\n");
167 		break;
168 	case GEN6_RC6:
169 		seq_puts(m, "RC6\n");
170 		break;
171 	case GEN6_RC7:
172 		seq_puts(m, "RC7\n");
173 		break;
174 	default:
175 		seq_puts(m, "Unknown\n");
176 		break;
177 	}
178 
179 	seq_printf(m, "Core Power Down: %s\n",
180 		   str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
181 	if (GRAPHICS_VER(i915) >= 9) {
182 		seq_printf(m, "Render Power Well: %s\n",
183 			   (gen9_powergate_status &
184 			    GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
185 		seq_printf(m, "Media Power Well: %s\n",
186 			   (gen9_powergate_status &
187 			    GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
188 	}
189 
190 	/* Not exactly sure what this is */
191 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
192 		      GEN6_GT_GFX_RC6_LOCKED);
193 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
194 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
195 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
196 
197 	if (GRAPHICS_VER(i915) <= 7) {
198 		seq_printf(m, "RC6   voltage: %dmV\n",
199 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
200 		seq_printf(m, "RC6+  voltage: %dmV\n",
201 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
202 		seq_printf(m, "RC6++ voltage: %dmV\n",
203 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
204 	}
205 
206 	return fw_domains_show(m, NULL);
207 }
208 
209 static int ilk_drpc(struct seq_file *m)
210 {
211 	struct intel_gt *gt = m->private;
212 	struct intel_uncore *uncore = gt->uncore;
213 	u32 rgvmodectl, rstdbyctl;
214 	u16 crstandvid;
215 
216 	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
217 	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
218 	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
219 
220 	seq_printf(m, "HD boost: %s\n",
221 		   str_yes_no(rgvmodectl & MEMMODE_BOOST_EN));
222 	seq_printf(m, "Boost freq: %d\n",
223 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
224 		   MEMMODE_BOOST_FREQ_SHIFT);
225 	seq_printf(m, "HW control enabled: %s\n",
226 		   str_yes_no(rgvmodectl & MEMMODE_HWIDLE_EN));
227 	seq_printf(m, "SW control enabled: %s\n",
228 		   str_yes_no(rgvmodectl & MEMMODE_SWMODE_EN));
229 	seq_printf(m, "Gated voltage change: %s\n",
230 		   str_yes_no(rgvmodectl & MEMMODE_RCLK_GATE));
231 	seq_printf(m, "Starting frequency: P%d\n",
232 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
233 	seq_printf(m, "Max P-state: P%d\n",
234 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
235 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
236 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
237 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
238 	seq_printf(m, "Render standby enabled: %s\n",
239 		   str_yes_no(!(rstdbyctl & RCX_SW_EXIT)));
240 	seq_puts(m, "Current RS state: ");
241 	switch (rstdbyctl & RSX_STATUS_MASK) {
242 	case RSX_STATUS_ON:
243 		seq_puts(m, "on\n");
244 		break;
245 	case RSX_STATUS_RC1:
246 		seq_puts(m, "RC1\n");
247 		break;
248 	case RSX_STATUS_RC1E:
249 		seq_puts(m, "RC1E\n");
250 		break;
251 	case RSX_STATUS_RS1:
252 		seq_puts(m, "RS1\n");
253 		break;
254 	case RSX_STATUS_RS2:
255 		seq_puts(m, "RS2 (RC6)\n");
256 		break;
257 	case RSX_STATUS_RS3:
258 		seq_puts(m, "RC3 (RC6+)\n");
259 		break;
260 	default:
261 		seq_puts(m, "unknown\n");
262 		break;
263 	}
264 
265 	return 0;
266 }
267 
268 static int drpc_show(struct seq_file *m, void *unused)
269 {
270 	struct intel_gt *gt = m->private;
271 	struct drm_i915_private *i915 = gt->i915;
272 	intel_wakeref_t wakeref;
273 	int err = -ENODEV;
274 
275 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
276 		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
277 			err = vlv_drpc(m);
278 		else if (GRAPHICS_VER(i915) >= 6)
279 			err = gen6_drpc(m);
280 		else
281 			err = ilk_drpc(m);
282 	}
283 
284 	return err;
285 }
286 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(drpc);
287 
288 void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
289 {
290 	struct drm_i915_private *i915 = gt->i915;
291 	struct intel_uncore *uncore = gt->uncore;
292 	struct intel_rps *rps = &gt->rps;
293 	intel_wakeref_t wakeref;
294 
295 	wakeref = intel_runtime_pm_get(uncore->rpm);
296 
297 	if (GRAPHICS_VER(i915) == 5) {
298 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
299 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
300 
301 		drm_printf(p, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
302 		drm_printf(p, "Requested VID: %d\n", rgvswctl & 0x3f);
303 		drm_printf(p, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
304 			   MEMSTAT_VID_SHIFT);
305 		drm_printf(p, "Current P-state: %d\n",
306 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
307 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
308 		u32 rpmodectl, freq_sts;
309 
310 		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
311 		drm_printf(p, "Video Turbo Mode: %s\n",
312 			   str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
313 		drm_printf(p, "HW control enabled: %s\n",
314 			   str_yes_no(rpmodectl & GEN6_RP_ENABLE));
315 		drm_printf(p, "SW control enabled: %s\n",
316 			   str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
317 
318 		vlv_punit_get(i915);
319 		freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
320 		vlv_punit_put(i915);
321 
322 		drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
323 		drm_printf(p, "DDR freq: %d MHz\n", i915->mem_freq);
324 
325 		drm_printf(p, "actual GPU freq: %d MHz\n",
326 			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
327 
328 		drm_printf(p, "current GPU freq: %d MHz\n",
329 			   intel_gpu_freq(rps, rps->cur_freq));
330 
331 		drm_printf(p, "max GPU freq: %d MHz\n",
332 			   intel_gpu_freq(rps, rps->max_freq));
333 
334 		drm_printf(p, "min GPU freq: %d MHz\n",
335 			   intel_gpu_freq(rps, rps->min_freq));
336 
337 		drm_printf(p, "idle GPU freq: %d MHz\n",
338 			   intel_gpu_freq(rps, rps->idle_freq));
339 
340 		drm_printf(p, "efficient (RPe) frequency: %d MHz\n",
341 			   intel_gpu_freq(rps, rps->efficient_freq));
342 	} else if (GRAPHICS_VER(i915) >= 6) {
343 		u32 rp_state_limits;
344 		u32 gt_perf_status;
345 		struct intel_rps_freq_caps caps;
346 		u32 rpmodectl, rpinclimit, rpdeclimit;
347 		u32 rpstat, cagf, reqf;
348 		u32 rpcurupei, rpcurup, rpprevup;
349 		u32 rpcurdownei, rpcurdown, rpprevdown;
350 		u32 rpupei, rpupt, rpdownei, rpdownt;
351 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
352 
353 		rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
354 		gen6_rps_get_freq_caps(rps, &caps);
355 		if (IS_GEN9_LP(i915))
356 			gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
357 		else
358 			gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
359 
360 		/* RPSTAT1 is in the GT power well */
361 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
362 
363 		reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
364 		if (GRAPHICS_VER(i915) >= 9) {
365 			reqf >>= 23;
366 		} else {
367 			reqf &= ~GEN6_TURBO_DISABLE;
368 			if (IS_HASWELL(i915) || IS_BROADWELL(i915))
369 				reqf >>= 24;
370 			else
371 				reqf >>= 25;
372 		}
373 		reqf = intel_gpu_freq(rps, reqf);
374 
375 		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
376 		rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
377 		rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
378 
379 		rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
380 		rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
381 		rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
382 		rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
383 		rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
384 		rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
385 		rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
386 
387 		rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI);
388 		rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
389 
390 		rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI);
391 		rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
392 
393 		cagf = intel_rps_read_actual_frequency(rps);
394 
395 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
396 
397 		if (GRAPHICS_VER(i915) >= 11) {
398 			pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
399 			pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
400 			/*
401 			 * The equivalent to the PM ISR & IIR cannot be read
402 			 * without affecting the current state of the system
403 			 */
404 			pm_isr = 0;
405 			pm_iir = 0;
406 		} else if (GRAPHICS_VER(i915) >= 8) {
407 			pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
408 			pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
409 			pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
410 			pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2));
411 		} else {
412 			pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
413 			pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
414 			pm_isr = intel_uncore_read(uncore, GEN6_PMISR);
415 			pm_iir = intel_uncore_read(uncore, GEN6_PMIIR);
416 		}
417 		pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
418 
419 		drm_printf(p, "Video Turbo Mode: %s\n",
420 			   str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
421 		drm_printf(p, "HW control enabled: %s\n",
422 			   str_yes_no(rpmodectl & GEN6_RP_ENABLE));
423 		drm_printf(p, "SW control enabled: %s\n",
424 			   str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
425 
426 		drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
427 			   pm_ier, pm_imr, pm_mask);
428 		if (GRAPHICS_VER(i915) <= 10)
429 			drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n",
430 				   pm_isr, pm_iir);
431 		drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n",
432 			   rps->pm_intrmsk_mbz);
433 		drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
434 		drm_printf(p, "Render p-state ratio: %d\n",
435 			   (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
436 		drm_printf(p, "Render p-state VID: %d\n",
437 			   gt_perf_status & 0xff);
438 		drm_printf(p, "Render p-state limit: %d\n",
439 			   rp_state_limits & 0xff);
440 		drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat);
441 		drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl);
442 		drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit);
443 		drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
444 		drm_printf(p, "RPNSWREQ: %dMHz\n", reqf);
445 		drm_printf(p, "CAGF: %dMHz\n", cagf);
446 		drm_printf(p, "RP CUR UP EI: %d (%lldns)\n",
447 			   rpcurupei,
448 			   intel_gt_pm_interval_to_ns(gt, rpcurupei));
449 		drm_printf(p, "RP CUR UP: %d (%lldns)\n",
450 			   rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
451 		drm_printf(p, "RP PREV UP: %d (%lldns)\n",
452 			   rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
453 		drm_printf(p, "Up threshold: %d%%\n",
454 			   rps->power.up_threshold);
455 		drm_printf(p, "RP UP EI: %d (%lldns)\n",
456 			   rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
457 		drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n",
458 			   rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
459 
460 		drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n",
461 			   rpcurdownei,
462 			   intel_gt_pm_interval_to_ns(gt, rpcurdownei));
463 		drm_printf(p, "RP CUR DOWN: %d (%lldns)\n",
464 			   rpcurdown,
465 			   intel_gt_pm_interval_to_ns(gt, rpcurdown));
466 		drm_printf(p, "RP PREV DOWN: %d (%lldns)\n",
467 			   rpprevdown,
468 			   intel_gt_pm_interval_to_ns(gt, rpprevdown));
469 		drm_printf(p, "Down threshold: %d%%\n",
470 			   rps->power.down_threshold);
471 		drm_printf(p, "RP DOWN EI: %d (%lldns)\n",
472 			   rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
473 		drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n",
474 			   rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
475 
476 		drm_printf(p, "Lowest (RPN) frequency: %dMHz\n",
477 			   intel_gpu_freq(rps, caps.min_freq));
478 		drm_printf(p, "Nominal (RP1) frequency: %dMHz\n",
479 			   intel_gpu_freq(rps, caps.rp1_freq));
480 		drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n",
481 			   intel_gpu_freq(rps, caps.rp0_freq));
482 		drm_printf(p, "Max overclocked frequency: %dMHz\n",
483 			   intel_gpu_freq(rps, rps->max_freq));
484 
485 		drm_printf(p, "Current freq: %d MHz\n",
486 			   intel_gpu_freq(rps, rps->cur_freq));
487 		drm_printf(p, "Actual freq: %d MHz\n", cagf);
488 		drm_printf(p, "Idle freq: %d MHz\n",
489 			   intel_gpu_freq(rps, rps->idle_freq));
490 		drm_printf(p, "Min freq: %d MHz\n",
491 			   intel_gpu_freq(rps, rps->min_freq));
492 		drm_printf(p, "Boost freq: %d MHz\n",
493 			   intel_gpu_freq(rps, rps->boost_freq));
494 		drm_printf(p, "Max freq: %d MHz\n",
495 			   intel_gpu_freq(rps, rps->max_freq));
496 		drm_printf(p,
497 			   "efficient (RPe) frequency: %d MHz\n",
498 			   intel_gpu_freq(rps, rps->efficient_freq));
499 	} else {
500 		drm_puts(p, "no P-state info available\n");
501 	}
502 
503 	drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
504 	drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
505 	drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
506 
507 	intel_runtime_pm_put(uncore->rpm, wakeref);
508 }
509 
510 static int frequency_show(struct seq_file *m, void *unused)
511 {
512 	struct intel_gt *gt = m->private;
513 	struct drm_printer p = drm_seq_file_printer(m);
514 
515 	intel_gt_pm_frequency_dump(gt, &p);
516 
517 	return 0;
518 }
519 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(frequency);
520 
521 static int llc_show(struct seq_file *m, void *data)
522 {
523 	struct intel_gt *gt = m->private;
524 	struct drm_i915_private *i915 = gt->i915;
525 	const bool edram = GRAPHICS_VER(i915) > 8;
526 	struct intel_rps *rps = &gt->rps;
527 	unsigned int max_gpu_freq, min_gpu_freq;
528 	intel_wakeref_t wakeref;
529 	int gpu_freq, ia_freq;
530 
531 	seq_printf(m, "LLC: %s\n", str_yes_no(HAS_LLC(i915)));
532 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
533 		   i915->edram_size_mb);
534 
535 	min_gpu_freq = rps->min_freq;
536 	max_gpu_freq = rps->max_freq;
537 	if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
538 		/* Convert GT frequency to 50 HZ units */
539 		min_gpu_freq /= GEN9_FREQ_SCALER;
540 		max_gpu_freq /= GEN9_FREQ_SCALER;
541 	}
542 
543 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
544 
545 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
546 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
547 		ia_freq = gpu_freq;
548 		snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE,
549 			       &ia_freq, NULL);
550 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
551 			   intel_gpu_freq(rps,
552 					  (gpu_freq *
553 					   (IS_GEN9_BC(i915) ||
554 					    GRAPHICS_VER(i915) >= 11 ?
555 					    GEN9_FREQ_SCALER : 1))),
556 			   ((ia_freq >> 0) & 0xff) * 100,
557 			   ((ia_freq >> 8) & 0xff) * 100);
558 	}
559 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
560 
561 	return 0;
562 }
563 
564 static bool llc_eval(void *data)
565 {
566 	struct intel_gt *gt = data;
567 
568 	return HAS_LLC(gt->i915);
569 }
570 
571 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(llc);
572 
573 static const char *rps_power_to_str(unsigned int power)
574 {
575 	static const char * const strings[] = {
576 		[LOW_POWER] = "low power",
577 		[BETWEEN] = "mixed",
578 		[HIGH_POWER] = "high power",
579 	};
580 
581 	if (power >= ARRAY_SIZE(strings) || !strings[power])
582 		return "unknown";
583 
584 	return strings[power];
585 }
586 
587 static int rps_boost_show(struct seq_file *m, void *data)
588 {
589 	struct intel_gt *gt = m->private;
590 	struct drm_i915_private *i915 = gt->i915;
591 	struct intel_rps *rps = &gt->rps;
592 
593 	seq_printf(m, "RPS enabled? %s\n",
594 		   str_yes_no(intel_rps_is_enabled(rps)));
595 	seq_printf(m, "RPS active? %s\n",
596 		   str_yes_no(intel_rps_is_active(rps)));
597 	seq_printf(m, "GPU busy? %s, %llums\n",
598 		   str_yes_no(gt->awake),
599 		   ktime_to_ms(intel_gt_get_awake_time(gt)));
600 	seq_printf(m, "Boosts outstanding? %d\n",
601 		   atomic_read(&rps->num_waiters));
602 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
603 	seq_printf(m, "Frequency requested %d, actual %d\n",
604 		   intel_gpu_freq(rps, rps->cur_freq),
605 		   intel_rps_read_actual_frequency(rps));
606 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
607 		   intel_gpu_freq(rps, rps->min_freq),
608 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
609 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
610 		   intel_gpu_freq(rps, rps->max_freq));
611 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
612 		   intel_gpu_freq(rps, rps->idle_freq),
613 		   intel_gpu_freq(rps, rps->efficient_freq),
614 		   intel_gpu_freq(rps, rps->boost_freq));
615 
616 	seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
617 
618 	if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) {
619 		struct intel_uncore *uncore = gt->uncore;
620 		u32 rpup, rpupei;
621 		u32 rpdown, rpdownei;
622 
623 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
624 		rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
625 		rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
626 		rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
627 		rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
628 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
629 
630 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
631 			   rps_power_to_str(rps->power.mode));
632 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
633 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
634 			   rps->power.up_threshold);
635 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
636 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
637 			   rps->power.down_threshold);
638 	} else {
639 		seq_puts(m, "\nRPS Autotuning inactive\n");
640 	}
641 
642 	return 0;
643 }
644 
645 static bool rps_eval(void *data)
646 {
647 	struct intel_gt *gt = data;
648 
649 	return HAS_RPS(gt->i915);
650 }
651 
652 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost);
653 
654 void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
655 {
656 	static const struct intel_gt_debugfs_file files[] = {
657 		{ "drpc", &drpc_fops, NULL },
658 		{ "frequency", &frequency_fops, NULL },
659 		{ "forcewake", &fw_domains_fops, NULL },
660 		{ "forcewake_user", &forcewake_user_fops, NULL},
661 		{ "llc", &llc_fops, llc_eval },
662 		{ "rps_boost", &rps_boost_fops, rps_eval },
663 	};
664 
665 	intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
666 }
667