1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dp.h"
15 #include "intel_fbc.h"
16 #include "intel_hdcp.h"
17 #include "intel_hdmi.h"
18 #include "intel_pm.h"
19 #include "intel_psr.h"
20 #include "intel_sideband.h"
21 
22 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
23 {
24 	return to_i915(node->minor->dev);
25 }
26 
27 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
28 {
29 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
30 
31 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
32 		   dev_priv->fb_tracking.busy_bits);
33 
34 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
35 		   dev_priv->fb_tracking.flip_bits);
36 
37 	return 0;
38 }
39 
40 static int i915_fbc_status(struct seq_file *m, void *unused)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	struct intel_fbc *fbc = &dev_priv->fbc;
44 	intel_wakeref_t wakeref;
45 
46 	if (!HAS_FBC(dev_priv))
47 		return -ENODEV;
48 
49 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
50 	mutex_lock(&fbc->lock);
51 
52 	if (intel_fbc_is_active(dev_priv))
53 		seq_puts(m, "FBC enabled\n");
54 	else
55 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
56 
57 	if (intel_fbc_is_active(dev_priv)) {
58 		u32 mask;
59 
60 		if (INTEL_GEN(dev_priv) >= 8)
61 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
62 		else if (INTEL_GEN(dev_priv) >= 7)
63 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
64 		else if (INTEL_GEN(dev_priv) >= 5)
65 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
66 		else if (IS_G4X(dev_priv))
67 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
68 		else
69 			mask = intel_de_read(dev_priv, FBC_STATUS) &
70 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
71 
72 		seq_printf(m, "Compressing: %s\n", yesno(mask));
73 	}
74 
75 	mutex_unlock(&fbc->lock);
76 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
77 
78 	return 0;
79 }
80 
81 static int i915_fbc_false_color_get(void *data, u64 *val)
82 {
83 	struct drm_i915_private *dev_priv = data;
84 
85 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
86 		return -ENODEV;
87 
88 	*val = dev_priv->fbc.false_color;
89 
90 	return 0;
91 }
92 
93 static int i915_fbc_false_color_set(void *data, u64 val)
94 {
95 	struct drm_i915_private *dev_priv = data;
96 	u32 reg;
97 
98 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
99 		return -ENODEV;
100 
101 	mutex_lock(&dev_priv->fbc.lock);
102 
103 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
104 	dev_priv->fbc.false_color = val;
105 
106 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
107 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
108 
109 	mutex_unlock(&dev_priv->fbc.lock);
110 	return 0;
111 }
112 
113 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
114 			i915_fbc_false_color_get, i915_fbc_false_color_set,
115 			"%llu\n");
116 
117 static int i915_ips_status(struct seq_file *m, void *unused)
118 {
119 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
120 	intel_wakeref_t wakeref;
121 
122 	if (!HAS_IPS(dev_priv))
123 		return -ENODEV;
124 
125 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
126 
127 	seq_printf(m, "Enabled by kernel parameter: %s\n",
128 		   yesno(dev_priv->params.enable_ips));
129 
130 	if (INTEL_GEN(dev_priv) >= 8) {
131 		seq_puts(m, "Currently: unknown\n");
132 	} else {
133 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
134 			seq_puts(m, "Currently: enabled\n");
135 		else
136 			seq_puts(m, "Currently: disabled\n");
137 	}
138 
139 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
140 
141 	return 0;
142 }
143 
144 static int i915_sr_status(struct seq_file *m, void *unused)
145 {
146 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
147 	intel_wakeref_t wakeref;
148 	bool sr_enabled = false;
149 
150 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
151 
152 	if (INTEL_GEN(dev_priv) >= 9)
153 		/* no global SR status; inspect per-plane WM */;
154 	else if (HAS_PCH_SPLIT(dev_priv))
155 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
156 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
157 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
158 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
159 	else if (IS_I915GM(dev_priv))
160 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
161 	else if (IS_PINEVIEW(dev_priv))
162 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
163 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
164 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
165 
166 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
167 
168 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
169 
170 	return 0;
171 }
172 
173 static int i915_opregion(struct seq_file *m, void *unused)
174 {
175 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
176 
177 	if (opregion->header)
178 		seq_write(m, opregion->header, OPREGION_SIZE);
179 
180 	return 0;
181 }
182 
183 static int i915_vbt(struct seq_file *m, void *unused)
184 {
185 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
186 
187 	if (opregion->vbt)
188 		seq_write(m, opregion->vbt, opregion->vbt_size);
189 
190 	return 0;
191 }
192 
193 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
194 {
195 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
196 	struct drm_device *dev = &dev_priv->drm;
197 	struct intel_framebuffer *fbdev_fb = NULL;
198 	struct drm_framebuffer *drm_fb;
199 
200 #ifdef CONFIG_DRM_FBDEV_EMULATION
201 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
202 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
203 
204 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
205 			   fbdev_fb->base.width,
206 			   fbdev_fb->base.height,
207 			   fbdev_fb->base.format->depth,
208 			   fbdev_fb->base.format->cpp[0] * 8,
209 			   fbdev_fb->base.modifier,
210 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
211 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
212 		seq_putc(m, '\n');
213 	}
214 #endif
215 
216 	mutex_lock(&dev->mode_config.fb_lock);
217 	drm_for_each_fb(drm_fb, dev) {
218 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
219 		if (fb == fbdev_fb)
220 			continue;
221 
222 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
223 			   fb->base.width,
224 			   fb->base.height,
225 			   fb->base.format->depth,
226 			   fb->base.format->cpp[0] * 8,
227 			   fb->base.modifier,
228 			   drm_framebuffer_read_refcount(&fb->base));
229 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
230 		seq_putc(m, '\n');
231 	}
232 	mutex_unlock(&dev->mode_config.fb_lock);
233 
234 	return 0;
235 }
236 
237 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
238 {
239 	u8 val;
240 	static const char * const sink_status[] = {
241 		"inactive",
242 		"transition to active, capture and display",
243 		"active, display from RFB",
244 		"active, capture and display on sink device timings",
245 		"transition to inactive, capture and display, timing re-sync",
246 		"reserved",
247 		"reserved",
248 		"sink internal error",
249 	};
250 	struct drm_connector *connector = m->private;
251 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
252 	struct intel_dp *intel_dp =
253 		intel_attached_dp(to_intel_connector(connector));
254 	int ret;
255 
256 	if (!CAN_PSR(dev_priv)) {
257 		seq_puts(m, "PSR Unsupported\n");
258 		return -ENODEV;
259 	}
260 
261 	if (connector->status != connector_status_connected)
262 		return -ENODEV;
263 
264 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
265 
266 	if (ret == 1) {
267 		const char *str = "unknown";
268 
269 		val &= DP_PSR_SINK_STATE_MASK;
270 		if (val < ARRAY_SIZE(sink_status))
271 			str = sink_status[val];
272 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
273 	} else {
274 		return ret;
275 	}
276 
277 	return 0;
278 }
279 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
280 
281 static void
282 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
283 {
284 	u32 val, status_val;
285 	const char *status = "unknown";
286 
287 	if (dev_priv->psr.psr2_enabled) {
288 		static const char * const live_status[] = {
289 			"IDLE",
290 			"CAPTURE",
291 			"CAPTURE_FS",
292 			"SLEEP",
293 			"BUFON_FW",
294 			"ML_UP",
295 			"SU_STANDBY",
296 			"FAST_SLEEP",
297 			"DEEP_SLEEP",
298 			"BUF_ON",
299 			"TG_ON"
300 		};
301 		val = intel_de_read(dev_priv,
302 				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
303 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
304 			      EDP_PSR2_STATUS_STATE_SHIFT;
305 		if (status_val < ARRAY_SIZE(live_status))
306 			status = live_status[status_val];
307 	} else {
308 		static const char * const live_status[] = {
309 			"IDLE",
310 			"SRDONACK",
311 			"SRDENT",
312 			"BUFOFF",
313 			"BUFON",
314 			"AUXACK",
315 			"SRDOFFACK",
316 			"SRDENT_ON",
317 		};
318 		val = intel_de_read(dev_priv,
319 				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
320 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
321 			      EDP_PSR_STATUS_STATE_SHIFT;
322 		if (status_val < ARRAY_SIZE(live_status))
323 			status = live_status[status_val];
324 	}
325 
326 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
327 }
328 
329 static int i915_edp_psr_status(struct seq_file *m, void *data)
330 {
331 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
332 	struct i915_psr *psr = &dev_priv->psr;
333 	intel_wakeref_t wakeref;
334 	const char *status;
335 	bool enabled;
336 	u32 val;
337 
338 	if (!HAS_PSR(dev_priv))
339 		return -ENODEV;
340 
341 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
342 	if (psr->dp)
343 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
344 	seq_puts(m, "\n");
345 
346 	if (!psr->sink_support)
347 		return 0;
348 
349 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
350 	mutex_lock(&psr->lock);
351 
352 	if (psr->enabled)
353 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
354 	else
355 		status = "disabled";
356 	seq_printf(m, "PSR mode: %s\n", status);
357 
358 	if (!psr->enabled) {
359 		seq_printf(m, "PSR sink not reliable: %s\n",
360 			   yesno(psr->sink_not_reliable));
361 
362 		goto unlock;
363 	}
364 
365 	if (psr->psr2_enabled) {
366 		val = intel_de_read(dev_priv,
367 				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
368 		enabled = val & EDP_PSR2_ENABLE;
369 	} else {
370 		val = intel_de_read(dev_priv,
371 				    EDP_PSR_CTL(dev_priv->psr.transcoder));
372 		enabled = val & EDP_PSR_ENABLE;
373 	}
374 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
375 		   enableddisabled(enabled), val);
376 	psr_source_status(dev_priv, m);
377 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
378 		   psr->busy_frontbuffer_bits);
379 
380 	/*
381 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
382 	 */
383 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
384 		val = intel_de_read(dev_priv,
385 				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
386 		val &= EDP_PSR_PERF_CNT_MASK;
387 		seq_printf(m, "Performance counter: %u\n", val);
388 	}
389 
390 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
391 		seq_printf(m, "Last attempted entry at: %lld\n",
392 			   psr->last_entry_attempt);
393 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
394 	}
395 
396 	if (psr->psr2_enabled) {
397 		u32 su_frames_val[3];
398 		int frame;
399 
400 		/*
401 		 * Reading all 3 registers before hand to minimize crossing a
402 		 * frame boundary between register reads
403 		 */
404 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
405 			val = intel_de_read(dev_priv,
406 					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
407 			su_frames_val[frame / 3] = val;
408 		}
409 
410 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
411 
412 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
413 			u32 su_blocks;
414 
415 			su_blocks = su_frames_val[frame / 3] &
416 				    PSR2_SU_STATUS_MASK(frame);
417 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
418 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
419 		}
420 
421 		seq_printf(m, "PSR2 selective fetch: %s\n",
422 			   enableddisabled(psr->psr2_sel_fetch_enabled));
423 	}
424 
425 unlock:
426 	mutex_unlock(&psr->lock);
427 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
428 
429 	return 0;
430 }
431 
432 static int
433 i915_edp_psr_debug_set(void *data, u64 val)
434 {
435 	struct drm_i915_private *dev_priv = data;
436 	intel_wakeref_t wakeref;
437 	int ret;
438 
439 	if (!CAN_PSR(dev_priv))
440 		return -ENODEV;
441 
442 	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
443 
444 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
445 
446 	ret = intel_psr_debug_set(dev_priv, val);
447 
448 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
449 
450 	return ret;
451 }
452 
453 static int
454 i915_edp_psr_debug_get(void *data, u64 *val)
455 {
456 	struct drm_i915_private *dev_priv = data;
457 
458 	if (!CAN_PSR(dev_priv))
459 		return -ENODEV;
460 
461 	*val = READ_ONCE(dev_priv->psr.debug);
462 	return 0;
463 }
464 
465 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
466 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
467 			"%llu\n");
468 
469 static int i915_power_domain_info(struct seq_file *m, void *unused)
470 {
471 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
472 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
473 	int i;
474 
475 	mutex_lock(&power_domains->lock);
476 
477 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
478 	for (i = 0; i < power_domains->power_well_count; i++) {
479 		struct i915_power_well *power_well;
480 		enum intel_display_power_domain power_domain;
481 
482 		power_well = &power_domains->power_wells[i];
483 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
484 			   power_well->count);
485 
486 		for_each_power_domain(power_domain, power_well->desc->domains)
487 			seq_printf(m, "  %-23s %d\n",
488 				 intel_display_power_domain_str(power_domain),
489 				 power_domains->domain_use_count[power_domain]);
490 	}
491 
492 	mutex_unlock(&power_domains->lock);
493 
494 	return 0;
495 }
496 
497 static int i915_dmc_info(struct seq_file *m, void *unused)
498 {
499 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
500 	intel_wakeref_t wakeref;
501 	struct intel_csr *csr;
502 	i915_reg_t dc5_reg, dc6_reg = {};
503 
504 	if (!HAS_CSR(dev_priv))
505 		return -ENODEV;
506 
507 	csr = &dev_priv->csr;
508 
509 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
510 
511 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
512 	seq_printf(m, "path: %s\n", csr->fw_path);
513 
514 	if (!csr->dmc_payload)
515 		goto out;
516 
517 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
518 		   CSR_VERSION_MINOR(csr->version));
519 
520 	if (INTEL_GEN(dev_priv) >= 12) {
521 		if (IS_DGFX(dev_priv)) {
522 			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
523 		} else {
524 			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
525 			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
526 		}
527 
528 		/*
529 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
530 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
531 		 * reg for DC3CO debugging and validation,
532 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
533 		 */
534 		seq_printf(m, "DC3CO count: %d\n",
535 			   intel_de_read(dev_priv, DMC_DEBUG3));
536 	} else {
537 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
538 						 SKL_CSR_DC3_DC5_COUNT;
539 		if (!IS_GEN9_LP(dev_priv))
540 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
541 	}
542 
543 	seq_printf(m, "DC3 -> DC5 count: %d\n",
544 		   intel_de_read(dev_priv, dc5_reg));
545 	if (dc6_reg.reg)
546 		seq_printf(m, "DC5 -> DC6 count: %d\n",
547 			   intel_de_read(dev_priv, dc6_reg));
548 
549 out:
550 	seq_printf(m, "program base: 0x%08x\n",
551 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
552 	seq_printf(m, "ssp base: 0x%08x\n",
553 		   intel_de_read(dev_priv, CSR_SSP_BASE));
554 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
555 
556 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
557 
558 	return 0;
559 }
560 
561 static void intel_seq_print_mode(struct seq_file *m, int tabs,
562 				 const struct drm_display_mode *mode)
563 {
564 	int i;
565 
566 	for (i = 0; i < tabs; i++)
567 		seq_putc(m, '\t');
568 
569 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
570 }
571 
572 static void intel_encoder_info(struct seq_file *m,
573 			       struct intel_crtc *crtc,
574 			       struct intel_encoder *encoder)
575 {
576 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
577 	struct drm_connector_list_iter conn_iter;
578 	struct drm_connector *connector;
579 
580 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
581 		   encoder->base.base.id, encoder->base.name);
582 
583 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
584 	drm_for_each_connector_iter(connector, &conn_iter) {
585 		const struct drm_connector_state *conn_state =
586 			connector->state;
587 
588 		if (conn_state->best_encoder != &encoder->base)
589 			continue;
590 
591 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
592 			   connector->base.id, connector->name);
593 	}
594 	drm_connector_list_iter_end(&conn_iter);
595 }
596 
597 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
598 {
599 	const struct drm_display_mode *mode = panel->fixed_mode;
600 
601 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
602 }
603 
604 static void intel_hdcp_info(struct seq_file *m,
605 			    struct intel_connector *intel_connector)
606 {
607 	bool hdcp_cap, hdcp2_cap;
608 
609 	if (!intel_connector->hdcp.shim) {
610 		seq_puts(m, "No Connector Support");
611 		goto out;
612 	}
613 
614 	hdcp_cap = intel_hdcp_capable(intel_connector);
615 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
616 
617 	if (hdcp_cap)
618 		seq_puts(m, "HDCP1.4 ");
619 	if (hdcp2_cap)
620 		seq_puts(m, "HDCP2.2 ");
621 
622 	if (!hdcp_cap && !hdcp2_cap)
623 		seq_puts(m, "None");
624 
625 out:
626 	seq_puts(m, "\n");
627 }
628 
629 static void intel_dp_info(struct seq_file *m,
630 			  struct intel_connector *intel_connector)
631 {
632 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
633 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
634 	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
635 
636 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
637 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
638 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
639 		intel_panel_info(m, &intel_connector->panel);
640 
641 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
642 				edid ? edid->data : NULL, &intel_dp->aux);
643 }
644 
645 static void intel_dp_mst_info(struct seq_file *m,
646 			      struct intel_connector *intel_connector)
647 {
648 	bool has_audio = intel_connector->port->has_audio;
649 
650 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
651 }
652 
653 static void intel_hdmi_info(struct seq_file *m,
654 			    struct intel_connector *intel_connector)
655 {
656 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
657 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
658 
659 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
660 }
661 
662 static void intel_lvds_info(struct seq_file *m,
663 			    struct intel_connector *intel_connector)
664 {
665 	intel_panel_info(m, &intel_connector->panel);
666 }
667 
668 static void intel_connector_info(struct seq_file *m,
669 				 struct drm_connector *connector)
670 {
671 	struct intel_connector *intel_connector = to_intel_connector(connector);
672 	const struct drm_connector_state *conn_state = connector->state;
673 	struct intel_encoder *encoder =
674 		to_intel_encoder(conn_state->best_encoder);
675 	const struct drm_display_mode *mode;
676 
677 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
678 		   connector->base.id, connector->name,
679 		   drm_get_connector_status_name(connector->status));
680 
681 	if (connector->status == connector_status_disconnected)
682 		return;
683 
684 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
685 		   connector->display_info.width_mm,
686 		   connector->display_info.height_mm);
687 	seq_printf(m, "\tsubpixel order: %s\n",
688 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
689 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
690 
691 	if (!encoder)
692 		return;
693 
694 	switch (connector->connector_type) {
695 	case DRM_MODE_CONNECTOR_DisplayPort:
696 	case DRM_MODE_CONNECTOR_eDP:
697 		if (encoder->type == INTEL_OUTPUT_DP_MST)
698 			intel_dp_mst_info(m, intel_connector);
699 		else
700 			intel_dp_info(m, intel_connector);
701 		break;
702 	case DRM_MODE_CONNECTOR_LVDS:
703 		if (encoder->type == INTEL_OUTPUT_LVDS)
704 			intel_lvds_info(m, intel_connector);
705 		break;
706 	case DRM_MODE_CONNECTOR_HDMIA:
707 		if (encoder->type == INTEL_OUTPUT_HDMI ||
708 		    encoder->type == INTEL_OUTPUT_DDI)
709 			intel_hdmi_info(m, intel_connector);
710 		break;
711 	default:
712 		break;
713 	}
714 
715 	seq_puts(m, "\tHDCP version: ");
716 	intel_hdcp_info(m, intel_connector);
717 
718 	seq_printf(m, "\tmodes:\n");
719 	list_for_each_entry(mode, &connector->modes, head)
720 		intel_seq_print_mode(m, 2, mode);
721 }
722 
723 static const char *plane_type(enum drm_plane_type type)
724 {
725 	switch (type) {
726 	case DRM_PLANE_TYPE_OVERLAY:
727 		return "OVL";
728 	case DRM_PLANE_TYPE_PRIMARY:
729 		return "PRI";
730 	case DRM_PLANE_TYPE_CURSOR:
731 		return "CUR";
732 	/*
733 	 * Deliberately omitting default: to generate compiler warnings
734 	 * when a new drm_plane_type gets added.
735 	 */
736 	}
737 
738 	return "unknown";
739 }
740 
741 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
742 {
743 	/*
744 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
745 	 * will print them all to visualize if the values are misused
746 	 */
747 	snprintf(buf, bufsize,
748 		 "%s%s%s%s%s%s(0x%08x)",
749 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
750 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
751 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
752 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
753 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
754 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
755 		 rotation);
756 }
757 
758 static const char *plane_visibility(const struct intel_plane_state *plane_state)
759 {
760 	if (plane_state->uapi.visible)
761 		return "visible";
762 
763 	if (plane_state->planar_slave)
764 		return "planar-slave";
765 
766 	return "hidden";
767 }
768 
769 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
770 {
771 	const struct intel_plane_state *plane_state =
772 		to_intel_plane_state(plane->base.state);
773 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
774 	struct drm_format_name_buf format_name;
775 	struct drm_rect src, dst;
776 	char rot_str[48];
777 
778 	src = drm_plane_state_src(&plane_state->uapi);
779 	dst = drm_plane_state_dest(&plane_state->uapi);
780 
781 	if (fb)
782 		drm_get_format_name(fb->format->format, &format_name);
783 
784 	plane_rotation(rot_str, sizeof(rot_str),
785 		       plane_state->uapi.rotation);
786 
787 	seq_printf(m, "\t\tuapi: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
788 		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
789 		   fb ? fb->modifier : 0,
790 		   fb ? fb->width : 0, fb ? fb->height : 0,
791 		   plane_visibility(plane_state),
792 		   DRM_RECT_FP_ARG(&src),
793 		   DRM_RECT_ARG(&dst),
794 		   rot_str);
795 
796 	if (plane_state->planar_linked_plane)
797 		seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
798 			   plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
799 			   plane_state->planar_slave ? "slave" : "master");
800 }
801 
802 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
803 {
804 	const struct intel_plane_state *plane_state =
805 		to_intel_plane_state(plane->base.state);
806 	const struct drm_framebuffer *fb = plane_state->hw.fb;
807 	struct drm_format_name_buf format_name;
808 	char rot_str[48];
809 
810 	if (!fb)
811 		return;
812 
813 	drm_get_format_name(fb->format->format, &format_name);
814 
815 	plane_rotation(rot_str, sizeof(rot_str),
816 		       plane_state->hw.rotation);
817 
818 	seq_printf(m, "\t\thw: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
819 		   fb->base.id, format_name.str,
820 		   fb->modifier, fb->width, fb->height,
821 		   yesno(plane_state->uapi.visible),
822 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
823 		   DRM_RECT_ARG(&plane_state->uapi.dst),
824 		   rot_str);
825 }
826 
827 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
828 {
829 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
830 	struct intel_plane *plane;
831 
832 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
833 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
834 			   plane->base.base.id, plane->base.name,
835 			   plane_type(plane->base.type));
836 		intel_plane_uapi_info(m, plane);
837 		intel_plane_hw_info(m, plane);
838 	}
839 }
840 
841 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
842 {
843 	const struct intel_crtc_state *crtc_state =
844 		to_intel_crtc_state(crtc->base.state);
845 	int num_scalers = crtc->num_scalers;
846 	int i;
847 
848 	/* Not all platformas have a scaler */
849 	if (num_scalers) {
850 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
851 			   num_scalers,
852 			   crtc_state->scaler_state.scaler_users,
853 			   crtc_state->scaler_state.scaler_id);
854 
855 		for (i = 0; i < num_scalers; i++) {
856 			const struct intel_scaler *sc =
857 				&crtc_state->scaler_state.scalers[i];
858 
859 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
860 				   i, yesno(sc->in_use), sc->mode);
861 		}
862 		seq_puts(m, "\n");
863 	} else {
864 		seq_puts(m, "\tNo scalers available on this platform\n");
865 	}
866 }
867 
868 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
869 {
870 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
871 	const struct intel_crtc_state *crtc_state =
872 		to_intel_crtc_state(crtc->base.state);
873 	struct intel_encoder *encoder;
874 
875 	seq_printf(m, "[CRTC:%d:%s]:\n",
876 		   crtc->base.base.id, crtc->base.name);
877 
878 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
879 		   yesno(crtc_state->uapi.enable),
880 		   yesno(crtc_state->uapi.active),
881 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
882 
883 	if (crtc_state->hw.enable) {
884 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
885 			   yesno(crtc_state->hw.active),
886 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
887 
888 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
889 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
890 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
891 
892 		intel_scaler_info(m, crtc);
893 	}
894 
895 	if (crtc_state->bigjoiner)
896 		seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
897 			   crtc_state->bigjoiner_linked_crtc->base.base.id,
898 			   crtc_state->bigjoiner_linked_crtc->base.name,
899 			   crtc_state->bigjoiner_slave ? "slave" : "master");
900 
901 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
902 				    crtc_state->uapi.encoder_mask)
903 		intel_encoder_info(m, crtc, encoder);
904 
905 	intel_plane_info(m, crtc);
906 
907 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
908 		   yesno(!crtc->cpu_fifo_underrun_disabled),
909 		   yesno(!crtc->pch_fifo_underrun_disabled));
910 }
911 
912 static int i915_display_info(struct seq_file *m, void *unused)
913 {
914 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
915 	struct drm_device *dev = &dev_priv->drm;
916 	struct intel_crtc *crtc;
917 	struct drm_connector *connector;
918 	struct drm_connector_list_iter conn_iter;
919 	intel_wakeref_t wakeref;
920 
921 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
922 
923 	drm_modeset_lock_all(dev);
924 
925 	seq_printf(m, "CRTC info\n");
926 	seq_printf(m, "---------\n");
927 	for_each_intel_crtc(dev, crtc)
928 		intel_crtc_info(m, crtc);
929 
930 	seq_printf(m, "\n");
931 	seq_printf(m, "Connector info\n");
932 	seq_printf(m, "--------------\n");
933 	drm_connector_list_iter_begin(dev, &conn_iter);
934 	drm_for_each_connector_iter(connector, &conn_iter)
935 		intel_connector_info(m, connector);
936 	drm_connector_list_iter_end(&conn_iter);
937 
938 	drm_modeset_unlock_all(dev);
939 
940 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
941 
942 	return 0;
943 }
944 
945 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
946 {
947 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
948 	struct drm_device *dev = &dev_priv->drm;
949 	int i;
950 
951 	drm_modeset_lock_all(dev);
952 
953 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
954 		   dev_priv->dpll.ref_clks.nssc,
955 		   dev_priv->dpll.ref_clks.ssc);
956 
957 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
958 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
959 
960 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
961 			   pll->info->id);
962 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
963 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
964 		seq_printf(m, " tracked hardware state:\n");
965 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
966 		seq_printf(m, " dpll_md: 0x%08x\n",
967 			   pll->state.hw_state.dpll_md);
968 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
969 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
970 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
971 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
972 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
973 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
974 			   pll->state.hw_state.mg_refclkin_ctl);
975 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
976 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
977 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
978 			   pll->state.hw_state.mg_clktop2_hsclkctl);
979 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
980 			   pll->state.hw_state.mg_pll_div0);
981 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
982 			   pll->state.hw_state.mg_pll_div1);
983 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
984 			   pll->state.hw_state.mg_pll_lf);
985 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
986 			   pll->state.hw_state.mg_pll_frac_lock);
987 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
988 			   pll->state.hw_state.mg_pll_ssc);
989 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
990 			   pll->state.hw_state.mg_pll_bias);
991 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
992 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
993 	}
994 	drm_modeset_unlock_all(dev);
995 
996 	return 0;
997 }
998 
999 static int i915_ipc_status_show(struct seq_file *m, void *data)
1000 {
1001 	struct drm_i915_private *dev_priv = m->private;
1002 
1003 	seq_printf(m, "Isochronous Priority Control: %s\n",
1004 			yesno(dev_priv->ipc_enabled));
1005 	return 0;
1006 }
1007 
1008 static int i915_ipc_status_open(struct inode *inode, struct file *file)
1009 {
1010 	struct drm_i915_private *dev_priv = inode->i_private;
1011 
1012 	if (!HAS_IPC(dev_priv))
1013 		return -ENODEV;
1014 
1015 	return single_open(file, i915_ipc_status_show, dev_priv);
1016 }
1017 
1018 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1019 				     size_t len, loff_t *offp)
1020 {
1021 	struct seq_file *m = file->private_data;
1022 	struct drm_i915_private *dev_priv = m->private;
1023 	intel_wakeref_t wakeref;
1024 	bool enable;
1025 	int ret;
1026 
1027 	ret = kstrtobool_from_user(ubuf, len, &enable);
1028 	if (ret < 0)
1029 		return ret;
1030 
1031 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1032 		if (!dev_priv->ipc_enabled && enable)
1033 			drm_info(&dev_priv->drm,
1034 				 "Enabling IPC: WM will be proper only after next commit\n");
1035 		dev_priv->wm.distrust_bios_wm = true;
1036 		dev_priv->ipc_enabled = enable;
1037 		intel_enable_ipc(dev_priv);
1038 	}
1039 
1040 	return len;
1041 }
1042 
1043 static const struct file_operations i915_ipc_status_fops = {
1044 	.owner = THIS_MODULE,
1045 	.open = i915_ipc_status_open,
1046 	.read = seq_read,
1047 	.llseek = seq_lseek,
1048 	.release = single_release,
1049 	.write = i915_ipc_status_write
1050 };
1051 
1052 static int i915_ddb_info(struct seq_file *m, void *unused)
1053 {
1054 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1055 	struct drm_device *dev = &dev_priv->drm;
1056 	struct skl_ddb_entry *entry;
1057 	struct intel_crtc *crtc;
1058 
1059 	if (INTEL_GEN(dev_priv) < 9)
1060 		return -ENODEV;
1061 
1062 	drm_modeset_lock_all(dev);
1063 
1064 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1065 
1066 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1067 		struct intel_crtc_state *crtc_state =
1068 			to_intel_crtc_state(crtc->base.state);
1069 		enum pipe pipe = crtc->pipe;
1070 		enum plane_id plane_id;
1071 
1072 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1073 
1074 		for_each_plane_id_on_crtc(crtc, plane_id) {
1075 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1076 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1077 				   entry->start, entry->end,
1078 				   skl_ddb_entry_size(entry));
1079 		}
1080 
1081 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1082 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1083 			   entry->end, skl_ddb_entry_size(entry));
1084 	}
1085 
1086 	drm_modeset_unlock_all(dev);
1087 
1088 	return 0;
1089 }
1090 
1091 static void drrs_status_per_crtc(struct seq_file *m,
1092 				 struct drm_device *dev,
1093 				 struct intel_crtc *intel_crtc)
1094 {
1095 	struct drm_i915_private *dev_priv = to_i915(dev);
1096 	struct i915_drrs *drrs = &dev_priv->drrs;
1097 	int vrefresh = 0;
1098 	struct drm_connector *connector;
1099 	struct drm_connector_list_iter conn_iter;
1100 
1101 	drm_connector_list_iter_begin(dev, &conn_iter);
1102 	drm_for_each_connector_iter(connector, &conn_iter) {
1103 		bool supported = false;
1104 
1105 		if (connector->state->crtc != &intel_crtc->base)
1106 			continue;
1107 
1108 		seq_printf(m, "%s:\n", connector->name);
1109 
1110 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1111 		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1112 			supported = true;
1113 
1114 		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1115 	}
1116 	drm_connector_list_iter_end(&conn_iter);
1117 
1118 	seq_puts(m, "\n");
1119 
1120 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1121 		struct intel_panel *panel;
1122 
1123 		mutex_lock(&drrs->mutex);
1124 		/* DRRS Supported */
1125 		seq_puts(m, "\tDRRS Enabled: Yes\n");
1126 
1127 		/* disable_drrs() will make drrs->dp NULL */
1128 		if (!drrs->dp) {
1129 			seq_puts(m, "Idleness DRRS: Disabled\n");
1130 			if (dev_priv->psr.enabled)
1131 				seq_puts(m,
1132 				"\tAs PSR is enabled, DRRS is not enabled\n");
1133 			mutex_unlock(&drrs->mutex);
1134 			return;
1135 		}
1136 
1137 		panel = &drrs->dp->attached_connector->panel;
1138 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1139 					drrs->busy_frontbuffer_bits);
1140 
1141 		seq_puts(m, "\n\t\t");
1142 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1143 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1144 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1145 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1146 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1147 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1148 		} else {
1149 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1150 						drrs->refresh_rate_type);
1151 			mutex_unlock(&drrs->mutex);
1152 			return;
1153 		}
1154 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1155 
1156 		seq_puts(m, "\n\t\t");
1157 		mutex_unlock(&drrs->mutex);
1158 	} else {
1159 		/* DRRS not supported. Print the VBT parameter*/
1160 		seq_puts(m, "\tDRRS Enabled : No");
1161 	}
1162 	seq_puts(m, "\n");
1163 }
1164 
1165 static int i915_drrs_status(struct seq_file *m, void *unused)
1166 {
1167 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1168 	struct drm_device *dev = &dev_priv->drm;
1169 	struct intel_crtc *intel_crtc;
1170 	int active_crtc_cnt = 0;
1171 
1172 	drm_modeset_lock_all(dev);
1173 	for_each_intel_crtc(dev, intel_crtc) {
1174 		if (intel_crtc->base.state->active) {
1175 			active_crtc_cnt++;
1176 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1177 
1178 			drrs_status_per_crtc(m, dev, intel_crtc);
1179 		}
1180 	}
1181 	drm_modeset_unlock_all(dev);
1182 
1183 	if (!active_crtc_cnt)
1184 		seq_puts(m, "No active crtc found\n");
1185 
1186 	return 0;
1187 }
1188 
1189 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1190 				seq_puts(m, "LPSP: disabled\n"))
1191 
1192 static bool
1193 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1194 			      enum i915_power_well_id power_well_id)
1195 {
1196 	intel_wakeref_t wakeref;
1197 	bool is_enabled;
1198 
1199 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1200 	is_enabled = intel_display_power_well_is_enabled(i915,
1201 							 power_well_id);
1202 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1203 
1204 	return is_enabled;
1205 }
1206 
1207 static int i915_lpsp_status(struct seq_file *m, void *unused)
1208 {
1209 	struct drm_i915_private *i915 = node_to_i915(m->private);
1210 
1211 	switch (INTEL_GEN(i915)) {
1212 	case 12:
1213 	case 11:
1214 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1215 		break;
1216 	case 10:
1217 	case 9:
1218 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1219 		break;
1220 	default:
1221 		/*
1222 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1223 		 * support lpsp.
1224 		 */
1225 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1226 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1227 		else
1228 			seq_puts(m, "LPSP: not supported\n");
1229 	}
1230 
1231 	return 0;
1232 }
1233 
1234 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1235 {
1236 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1237 	struct drm_device *dev = &dev_priv->drm;
1238 	struct intel_encoder *intel_encoder;
1239 	struct intel_digital_port *dig_port;
1240 	struct drm_connector *connector;
1241 	struct drm_connector_list_iter conn_iter;
1242 
1243 	drm_connector_list_iter_begin(dev, &conn_iter);
1244 	drm_for_each_connector_iter(connector, &conn_iter) {
1245 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1246 			continue;
1247 
1248 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1249 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1250 			continue;
1251 
1252 		dig_port = enc_to_dig_port(intel_encoder);
1253 		if (!dig_port->dp.can_mst)
1254 			continue;
1255 
1256 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1257 			   dig_port->base.base.base.id,
1258 			   dig_port->base.base.name);
1259 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1260 	}
1261 	drm_connector_list_iter_end(&conn_iter);
1262 
1263 	return 0;
1264 }
1265 
1266 static ssize_t i915_displayport_test_active_write(struct file *file,
1267 						  const char __user *ubuf,
1268 						  size_t len, loff_t *offp)
1269 {
1270 	char *input_buffer;
1271 	int status = 0;
1272 	struct drm_device *dev;
1273 	struct drm_connector *connector;
1274 	struct drm_connector_list_iter conn_iter;
1275 	struct intel_dp *intel_dp;
1276 	int val = 0;
1277 
1278 	dev = ((struct seq_file *)file->private_data)->private;
1279 
1280 	if (len == 0)
1281 		return 0;
1282 
1283 	input_buffer = memdup_user_nul(ubuf, len);
1284 	if (IS_ERR(input_buffer))
1285 		return PTR_ERR(input_buffer);
1286 
1287 	drm_dbg(&to_i915(dev)->drm,
1288 		"Copied %d bytes from user\n", (unsigned int)len);
1289 
1290 	drm_connector_list_iter_begin(dev, &conn_iter);
1291 	drm_for_each_connector_iter(connector, &conn_iter) {
1292 		struct intel_encoder *encoder;
1293 
1294 		if (connector->connector_type !=
1295 		    DRM_MODE_CONNECTOR_DisplayPort)
1296 			continue;
1297 
1298 		encoder = to_intel_encoder(connector->encoder);
1299 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1300 			continue;
1301 
1302 		if (encoder && connector->status == connector_status_connected) {
1303 			intel_dp = enc_to_intel_dp(encoder);
1304 			status = kstrtoint(input_buffer, 10, &val);
1305 			if (status < 0)
1306 				break;
1307 			drm_dbg(&to_i915(dev)->drm,
1308 				"Got %d for test active\n", val);
1309 			/* To prevent erroneous activation of the compliance
1310 			 * testing code, only accept an actual value of 1 here
1311 			 */
1312 			if (val == 1)
1313 				intel_dp->compliance.test_active = true;
1314 			else
1315 				intel_dp->compliance.test_active = false;
1316 		}
1317 	}
1318 	drm_connector_list_iter_end(&conn_iter);
1319 	kfree(input_buffer);
1320 	if (status < 0)
1321 		return status;
1322 
1323 	*offp += len;
1324 	return len;
1325 }
1326 
1327 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1328 {
1329 	struct drm_i915_private *dev_priv = m->private;
1330 	struct drm_device *dev = &dev_priv->drm;
1331 	struct drm_connector *connector;
1332 	struct drm_connector_list_iter conn_iter;
1333 	struct intel_dp *intel_dp;
1334 
1335 	drm_connector_list_iter_begin(dev, &conn_iter);
1336 	drm_for_each_connector_iter(connector, &conn_iter) {
1337 		struct intel_encoder *encoder;
1338 
1339 		if (connector->connector_type !=
1340 		    DRM_MODE_CONNECTOR_DisplayPort)
1341 			continue;
1342 
1343 		encoder = to_intel_encoder(connector->encoder);
1344 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1345 			continue;
1346 
1347 		if (encoder && connector->status == connector_status_connected) {
1348 			intel_dp = enc_to_intel_dp(encoder);
1349 			if (intel_dp->compliance.test_active)
1350 				seq_puts(m, "1");
1351 			else
1352 				seq_puts(m, "0");
1353 		} else
1354 			seq_puts(m, "0");
1355 	}
1356 	drm_connector_list_iter_end(&conn_iter);
1357 
1358 	return 0;
1359 }
1360 
1361 static int i915_displayport_test_active_open(struct inode *inode,
1362 					     struct file *file)
1363 {
1364 	return single_open(file, i915_displayport_test_active_show,
1365 			   inode->i_private);
1366 }
1367 
1368 static const struct file_operations i915_displayport_test_active_fops = {
1369 	.owner = THIS_MODULE,
1370 	.open = i915_displayport_test_active_open,
1371 	.read = seq_read,
1372 	.llseek = seq_lseek,
1373 	.release = single_release,
1374 	.write = i915_displayport_test_active_write
1375 };
1376 
1377 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1378 {
1379 	struct drm_i915_private *dev_priv = m->private;
1380 	struct drm_device *dev = &dev_priv->drm;
1381 	struct drm_connector *connector;
1382 	struct drm_connector_list_iter conn_iter;
1383 	struct intel_dp *intel_dp;
1384 
1385 	drm_connector_list_iter_begin(dev, &conn_iter);
1386 	drm_for_each_connector_iter(connector, &conn_iter) {
1387 		struct intel_encoder *encoder;
1388 
1389 		if (connector->connector_type !=
1390 		    DRM_MODE_CONNECTOR_DisplayPort)
1391 			continue;
1392 
1393 		encoder = to_intel_encoder(connector->encoder);
1394 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1395 			continue;
1396 
1397 		if (encoder && connector->status == connector_status_connected) {
1398 			intel_dp = enc_to_intel_dp(encoder);
1399 			if (intel_dp->compliance.test_type ==
1400 			    DP_TEST_LINK_EDID_READ)
1401 				seq_printf(m, "%lx",
1402 					   intel_dp->compliance.test_data.edid);
1403 			else if (intel_dp->compliance.test_type ==
1404 				 DP_TEST_LINK_VIDEO_PATTERN) {
1405 				seq_printf(m, "hdisplay: %d\n",
1406 					   intel_dp->compliance.test_data.hdisplay);
1407 				seq_printf(m, "vdisplay: %d\n",
1408 					   intel_dp->compliance.test_data.vdisplay);
1409 				seq_printf(m, "bpc: %u\n",
1410 					   intel_dp->compliance.test_data.bpc);
1411 			} else if (intel_dp->compliance.test_type ==
1412 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1413 				seq_printf(m, "pattern: %d\n",
1414 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1415 				seq_printf(m, "Number of lanes: %d\n",
1416 					   intel_dp->compliance.test_data.phytest.num_lanes);
1417 				seq_printf(m, "Link Rate: %d\n",
1418 					   intel_dp->compliance.test_data.phytest.link_rate);
1419 				seq_printf(m, "level: %02x\n",
1420 					   intel_dp->train_set[0]);
1421 			}
1422 		} else
1423 			seq_puts(m, "0");
1424 	}
1425 	drm_connector_list_iter_end(&conn_iter);
1426 
1427 	return 0;
1428 }
1429 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1430 
1431 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1432 {
1433 	struct drm_i915_private *dev_priv = m->private;
1434 	struct drm_device *dev = &dev_priv->drm;
1435 	struct drm_connector *connector;
1436 	struct drm_connector_list_iter conn_iter;
1437 	struct intel_dp *intel_dp;
1438 
1439 	drm_connector_list_iter_begin(dev, &conn_iter);
1440 	drm_for_each_connector_iter(connector, &conn_iter) {
1441 		struct intel_encoder *encoder;
1442 
1443 		if (connector->connector_type !=
1444 		    DRM_MODE_CONNECTOR_DisplayPort)
1445 			continue;
1446 
1447 		encoder = to_intel_encoder(connector->encoder);
1448 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1449 			continue;
1450 
1451 		if (encoder && connector->status == connector_status_connected) {
1452 			intel_dp = enc_to_intel_dp(encoder);
1453 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1454 		} else
1455 			seq_puts(m, "0");
1456 	}
1457 	drm_connector_list_iter_end(&conn_iter);
1458 
1459 	return 0;
1460 }
1461 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1462 
1463 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1464 {
1465 	struct drm_i915_private *dev_priv = m->private;
1466 	struct drm_device *dev = &dev_priv->drm;
1467 	int level;
1468 	int num_levels;
1469 
1470 	if (IS_CHERRYVIEW(dev_priv))
1471 		num_levels = 3;
1472 	else if (IS_VALLEYVIEW(dev_priv))
1473 		num_levels = 1;
1474 	else if (IS_G4X(dev_priv))
1475 		num_levels = 3;
1476 	else
1477 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1478 
1479 	drm_modeset_lock_all(dev);
1480 
1481 	for (level = 0; level < num_levels; level++) {
1482 		unsigned int latency = wm[level];
1483 
1484 		/*
1485 		 * - WM1+ latency values in 0.5us units
1486 		 * - latencies are in us on gen9/vlv/chv
1487 		 */
1488 		if (INTEL_GEN(dev_priv) >= 9 ||
1489 		    IS_VALLEYVIEW(dev_priv) ||
1490 		    IS_CHERRYVIEW(dev_priv) ||
1491 		    IS_G4X(dev_priv))
1492 			latency *= 10;
1493 		else if (level > 0)
1494 			latency *= 5;
1495 
1496 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1497 			   level, wm[level], latency / 10, latency % 10);
1498 	}
1499 
1500 	drm_modeset_unlock_all(dev);
1501 }
1502 
1503 static int pri_wm_latency_show(struct seq_file *m, void *data)
1504 {
1505 	struct drm_i915_private *dev_priv = m->private;
1506 	const u16 *latencies;
1507 
1508 	if (INTEL_GEN(dev_priv) >= 9)
1509 		latencies = dev_priv->wm.skl_latency;
1510 	else
1511 		latencies = dev_priv->wm.pri_latency;
1512 
1513 	wm_latency_show(m, latencies);
1514 
1515 	return 0;
1516 }
1517 
1518 static int spr_wm_latency_show(struct seq_file *m, void *data)
1519 {
1520 	struct drm_i915_private *dev_priv = m->private;
1521 	const u16 *latencies;
1522 
1523 	if (INTEL_GEN(dev_priv) >= 9)
1524 		latencies = dev_priv->wm.skl_latency;
1525 	else
1526 		latencies = dev_priv->wm.spr_latency;
1527 
1528 	wm_latency_show(m, latencies);
1529 
1530 	return 0;
1531 }
1532 
1533 static int cur_wm_latency_show(struct seq_file *m, void *data)
1534 {
1535 	struct drm_i915_private *dev_priv = m->private;
1536 	const u16 *latencies;
1537 
1538 	if (INTEL_GEN(dev_priv) >= 9)
1539 		latencies = dev_priv->wm.skl_latency;
1540 	else
1541 		latencies = dev_priv->wm.cur_latency;
1542 
1543 	wm_latency_show(m, latencies);
1544 
1545 	return 0;
1546 }
1547 
1548 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1549 {
1550 	struct drm_i915_private *dev_priv = inode->i_private;
1551 
1552 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1553 		return -ENODEV;
1554 
1555 	return single_open(file, pri_wm_latency_show, dev_priv);
1556 }
1557 
1558 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1559 {
1560 	struct drm_i915_private *dev_priv = inode->i_private;
1561 
1562 	if (HAS_GMCH(dev_priv))
1563 		return -ENODEV;
1564 
1565 	return single_open(file, spr_wm_latency_show, dev_priv);
1566 }
1567 
1568 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1569 {
1570 	struct drm_i915_private *dev_priv = inode->i_private;
1571 
1572 	if (HAS_GMCH(dev_priv))
1573 		return -ENODEV;
1574 
1575 	return single_open(file, cur_wm_latency_show, dev_priv);
1576 }
1577 
1578 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1579 				size_t len, loff_t *offp, u16 wm[8])
1580 {
1581 	struct seq_file *m = file->private_data;
1582 	struct drm_i915_private *dev_priv = m->private;
1583 	struct drm_device *dev = &dev_priv->drm;
1584 	u16 new[8] = { 0 };
1585 	int num_levels;
1586 	int level;
1587 	int ret;
1588 	char tmp[32];
1589 
1590 	if (IS_CHERRYVIEW(dev_priv))
1591 		num_levels = 3;
1592 	else if (IS_VALLEYVIEW(dev_priv))
1593 		num_levels = 1;
1594 	else if (IS_G4X(dev_priv))
1595 		num_levels = 3;
1596 	else
1597 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1598 
1599 	if (len >= sizeof(tmp))
1600 		return -EINVAL;
1601 
1602 	if (copy_from_user(tmp, ubuf, len))
1603 		return -EFAULT;
1604 
1605 	tmp[len] = '\0';
1606 
1607 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1608 		     &new[0], &new[1], &new[2], &new[3],
1609 		     &new[4], &new[5], &new[6], &new[7]);
1610 	if (ret != num_levels)
1611 		return -EINVAL;
1612 
1613 	drm_modeset_lock_all(dev);
1614 
1615 	for (level = 0; level < num_levels; level++)
1616 		wm[level] = new[level];
1617 
1618 	drm_modeset_unlock_all(dev);
1619 
1620 	return len;
1621 }
1622 
1623 
1624 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1625 				    size_t len, loff_t *offp)
1626 {
1627 	struct seq_file *m = file->private_data;
1628 	struct drm_i915_private *dev_priv = m->private;
1629 	u16 *latencies;
1630 
1631 	if (INTEL_GEN(dev_priv) >= 9)
1632 		latencies = dev_priv->wm.skl_latency;
1633 	else
1634 		latencies = dev_priv->wm.pri_latency;
1635 
1636 	return wm_latency_write(file, ubuf, len, offp, latencies);
1637 }
1638 
1639 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1640 				    size_t len, loff_t *offp)
1641 {
1642 	struct seq_file *m = file->private_data;
1643 	struct drm_i915_private *dev_priv = m->private;
1644 	u16 *latencies;
1645 
1646 	if (INTEL_GEN(dev_priv) >= 9)
1647 		latencies = dev_priv->wm.skl_latency;
1648 	else
1649 		latencies = dev_priv->wm.spr_latency;
1650 
1651 	return wm_latency_write(file, ubuf, len, offp, latencies);
1652 }
1653 
1654 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1655 				    size_t len, loff_t *offp)
1656 {
1657 	struct seq_file *m = file->private_data;
1658 	struct drm_i915_private *dev_priv = m->private;
1659 	u16 *latencies;
1660 
1661 	if (INTEL_GEN(dev_priv) >= 9)
1662 		latencies = dev_priv->wm.skl_latency;
1663 	else
1664 		latencies = dev_priv->wm.cur_latency;
1665 
1666 	return wm_latency_write(file, ubuf, len, offp, latencies);
1667 }
1668 
1669 static const struct file_operations i915_pri_wm_latency_fops = {
1670 	.owner = THIS_MODULE,
1671 	.open = pri_wm_latency_open,
1672 	.read = seq_read,
1673 	.llseek = seq_lseek,
1674 	.release = single_release,
1675 	.write = pri_wm_latency_write
1676 };
1677 
1678 static const struct file_operations i915_spr_wm_latency_fops = {
1679 	.owner = THIS_MODULE,
1680 	.open = spr_wm_latency_open,
1681 	.read = seq_read,
1682 	.llseek = seq_lseek,
1683 	.release = single_release,
1684 	.write = spr_wm_latency_write
1685 };
1686 
1687 static const struct file_operations i915_cur_wm_latency_fops = {
1688 	.owner = THIS_MODULE,
1689 	.open = cur_wm_latency_open,
1690 	.read = seq_read,
1691 	.llseek = seq_lseek,
1692 	.release = single_release,
1693 	.write = cur_wm_latency_write
1694 };
1695 
1696 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1697 {
1698 	struct drm_i915_private *dev_priv = m->private;
1699 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1700 
1701 	/* Synchronize with everything first in case there's been an HPD
1702 	 * storm, but we haven't finished handling it in the kernel yet
1703 	 */
1704 	intel_synchronize_irq(dev_priv);
1705 	flush_work(&dev_priv->hotplug.dig_port_work);
1706 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1707 
1708 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1709 	seq_printf(m, "Detected: %s\n",
1710 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1711 
1712 	return 0;
1713 }
1714 
1715 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1716 					const char __user *ubuf, size_t len,
1717 					loff_t *offp)
1718 {
1719 	struct seq_file *m = file->private_data;
1720 	struct drm_i915_private *dev_priv = m->private;
1721 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1722 	unsigned int new_threshold;
1723 	int i;
1724 	char *newline;
1725 	char tmp[16];
1726 
1727 	if (len >= sizeof(tmp))
1728 		return -EINVAL;
1729 
1730 	if (copy_from_user(tmp, ubuf, len))
1731 		return -EFAULT;
1732 
1733 	tmp[len] = '\0';
1734 
1735 	/* Strip newline, if any */
1736 	newline = strchr(tmp, '\n');
1737 	if (newline)
1738 		*newline = '\0';
1739 
1740 	if (strcmp(tmp, "reset") == 0)
1741 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1742 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1743 		return -EINVAL;
1744 
1745 	if (new_threshold > 0)
1746 		drm_dbg_kms(&dev_priv->drm,
1747 			    "Setting HPD storm detection threshold to %d\n",
1748 			    new_threshold);
1749 	else
1750 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1751 
1752 	spin_lock_irq(&dev_priv->irq_lock);
1753 	hotplug->hpd_storm_threshold = new_threshold;
1754 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1755 	for_each_hpd_pin(i)
1756 		hotplug->stats[i].count = 0;
1757 	spin_unlock_irq(&dev_priv->irq_lock);
1758 
1759 	/* Re-enable hpd immediately if we were in an irq storm */
1760 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1761 
1762 	return len;
1763 }
1764 
1765 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1766 {
1767 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1768 }
1769 
1770 static const struct file_operations i915_hpd_storm_ctl_fops = {
1771 	.owner = THIS_MODULE,
1772 	.open = i915_hpd_storm_ctl_open,
1773 	.read = seq_read,
1774 	.llseek = seq_lseek,
1775 	.release = single_release,
1776 	.write = i915_hpd_storm_ctl_write
1777 };
1778 
1779 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1780 {
1781 	struct drm_i915_private *dev_priv = m->private;
1782 
1783 	seq_printf(m, "Enabled: %s\n",
1784 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1785 
1786 	return 0;
1787 }
1788 
1789 static int
1790 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1791 {
1792 	return single_open(file, i915_hpd_short_storm_ctl_show,
1793 			   inode->i_private);
1794 }
1795 
1796 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1797 					      const char __user *ubuf,
1798 					      size_t len, loff_t *offp)
1799 {
1800 	struct seq_file *m = file->private_data;
1801 	struct drm_i915_private *dev_priv = m->private;
1802 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1803 	char *newline;
1804 	char tmp[16];
1805 	int i;
1806 	bool new_state;
1807 
1808 	if (len >= sizeof(tmp))
1809 		return -EINVAL;
1810 
1811 	if (copy_from_user(tmp, ubuf, len))
1812 		return -EFAULT;
1813 
1814 	tmp[len] = '\0';
1815 
1816 	/* Strip newline, if any */
1817 	newline = strchr(tmp, '\n');
1818 	if (newline)
1819 		*newline = '\0';
1820 
1821 	/* Reset to the "default" state for this system */
1822 	if (strcmp(tmp, "reset") == 0)
1823 		new_state = !HAS_DP_MST(dev_priv);
1824 	else if (kstrtobool(tmp, &new_state) != 0)
1825 		return -EINVAL;
1826 
1827 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1828 		    new_state ? "En" : "Dis");
1829 
1830 	spin_lock_irq(&dev_priv->irq_lock);
1831 	hotplug->hpd_short_storm_enabled = new_state;
1832 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1833 	for_each_hpd_pin(i)
1834 		hotplug->stats[i].count = 0;
1835 	spin_unlock_irq(&dev_priv->irq_lock);
1836 
1837 	/* Re-enable hpd immediately if we were in an irq storm */
1838 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1839 
1840 	return len;
1841 }
1842 
1843 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1844 	.owner = THIS_MODULE,
1845 	.open = i915_hpd_short_storm_ctl_open,
1846 	.read = seq_read,
1847 	.llseek = seq_lseek,
1848 	.release = single_release,
1849 	.write = i915_hpd_short_storm_ctl_write,
1850 };
1851 
1852 static int i915_drrs_ctl_set(void *data, u64 val)
1853 {
1854 	struct drm_i915_private *dev_priv = data;
1855 	struct drm_device *dev = &dev_priv->drm;
1856 	struct intel_crtc *crtc;
1857 
1858 	if (INTEL_GEN(dev_priv) < 7)
1859 		return -ENODEV;
1860 
1861 	for_each_intel_crtc(dev, crtc) {
1862 		struct drm_connector_list_iter conn_iter;
1863 		struct intel_crtc_state *crtc_state;
1864 		struct drm_connector *connector;
1865 		struct drm_crtc_commit *commit;
1866 		int ret;
1867 
1868 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1869 		if (ret)
1870 			return ret;
1871 
1872 		crtc_state = to_intel_crtc_state(crtc->base.state);
1873 
1874 		if (!crtc_state->hw.active ||
1875 		    !crtc_state->has_drrs)
1876 			goto out;
1877 
1878 		commit = crtc_state->uapi.commit;
1879 		if (commit) {
1880 			ret = wait_for_completion_interruptible(&commit->hw_done);
1881 			if (ret)
1882 				goto out;
1883 		}
1884 
1885 		drm_connector_list_iter_begin(dev, &conn_iter);
1886 		drm_for_each_connector_iter(connector, &conn_iter) {
1887 			struct intel_encoder *encoder;
1888 			struct intel_dp *intel_dp;
1889 
1890 			if (!(crtc_state->uapi.connector_mask &
1891 			      drm_connector_mask(connector)))
1892 				continue;
1893 
1894 			encoder = intel_attached_encoder(to_intel_connector(connector));
1895 			if (encoder->type != INTEL_OUTPUT_EDP)
1896 				continue;
1897 
1898 			drm_dbg(&dev_priv->drm,
1899 				"Manually %sabling DRRS. %llu\n",
1900 				val ? "en" : "dis", val);
1901 
1902 			intel_dp = enc_to_intel_dp(encoder);
1903 			if (val)
1904 				intel_edp_drrs_enable(intel_dp,
1905 						      crtc_state);
1906 			else
1907 				intel_edp_drrs_disable(intel_dp,
1908 						       crtc_state);
1909 		}
1910 		drm_connector_list_iter_end(&conn_iter);
1911 
1912 out:
1913 		drm_modeset_unlock(&crtc->base.mutex);
1914 		if (ret)
1915 			return ret;
1916 	}
1917 
1918 	return 0;
1919 }
1920 
1921 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1922 
1923 static ssize_t
1924 i915_fifo_underrun_reset_write(struct file *filp,
1925 			       const char __user *ubuf,
1926 			       size_t cnt, loff_t *ppos)
1927 {
1928 	struct drm_i915_private *dev_priv = filp->private_data;
1929 	struct intel_crtc *intel_crtc;
1930 	struct drm_device *dev = &dev_priv->drm;
1931 	int ret;
1932 	bool reset;
1933 
1934 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1935 	if (ret)
1936 		return ret;
1937 
1938 	if (!reset)
1939 		return cnt;
1940 
1941 	for_each_intel_crtc(dev, intel_crtc) {
1942 		struct drm_crtc_commit *commit;
1943 		struct intel_crtc_state *crtc_state;
1944 
1945 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
1946 		if (ret)
1947 			return ret;
1948 
1949 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
1950 		commit = crtc_state->uapi.commit;
1951 		if (commit) {
1952 			ret = wait_for_completion_interruptible(&commit->hw_done);
1953 			if (!ret)
1954 				ret = wait_for_completion_interruptible(&commit->flip_done);
1955 		}
1956 
1957 		if (!ret && crtc_state->hw.active) {
1958 			drm_dbg_kms(&dev_priv->drm,
1959 				    "Re-arming FIFO underruns on pipe %c\n",
1960 				    pipe_name(intel_crtc->pipe));
1961 
1962 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
1963 		}
1964 
1965 		drm_modeset_unlock(&intel_crtc->base.mutex);
1966 
1967 		if (ret)
1968 			return ret;
1969 	}
1970 
1971 	ret = intel_fbc_reset_underrun(dev_priv);
1972 	if (ret)
1973 		return ret;
1974 
1975 	return cnt;
1976 }
1977 
1978 static const struct file_operations i915_fifo_underrun_reset_ops = {
1979 	.owner = THIS_MODULE,
1980 	.open = simple_open,
1981 	.write = i915_fifo_underrun_reset_write,
1982 	.llseek = default_llseek,
1983 };
1984 
1985 static const struct drm_info_list intel_display_debugfs_list[] = {
1986 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
1987 	{"i915_fbc_status", i915_fbc_status, 0},
1988 	{"i915_ips_status", i915_ips_status, 0},
1989 	{"i915_sr_status", i915_sr_status, 0},
1990 	{"i915_opregion", i915_opregion, 0},
1991 	{"i915_vbt", i915_vbt, 0},
1992 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1993 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
1994 	{"i915_power_domain_info", i915_power_domain_info, 0},
1995 	{"i915_dmc_info", i915_dmc_info, 0},
1996 	{"i915_display_info", i915_display_info, 0},
1997 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
1998 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
1999 	{"i915_ddb_info", i915_ddb_info, 0},
2000 	{"i915_drrs_status", i915_drrs_status, 0},
2001 	{"i915_lpsp_status", i915_lpsp_status, 0},
2002 };
2003 
2004 static const struct {
2005 	const char *name;
2006 	const struct file_operations *fops;
2007 } intel_display_debugfs_files[] = {
2008 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2009 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2010 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2011 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2012 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
2013 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
2014 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
2015 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
2016 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2017 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2018 	{"i915_ipc_status", &i915_ipc_status_fops},
2019 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
2020 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2021 };
2022 
2023 void intel_display_debugfs_register(struct drm_i915_private *i915)
2024 {
2025 	struct drm_minor *minor = i915->drm.primary;
2026 	int i;
2027 
2028 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2029 		debugfs_create_file(intel_display_debugfs_files[i].name,
2030 				    S_IRUGO | S_IWUSR,
2031 				    minor->debugfs_root,
2032 				    to_i915(minor->dev),
2033 				    intel_display_debugfs_files[i].fops);
2034 	}
2035 
2036 	drm_debugfs_create_files(intel_display_debugfs_list,
2037 				 ARRAY_SIZE(intel_display_debugfs_list),
2038 				 minor->debugfs_root, minor);
2039 }
2040 
2041 static int i915_panel_show(struct seq_file *m, void *data)
2042 {
2043 	struct drm_connector *connector = m->private;
2044 	struct intel_dp *intel_dp =
2045 		intel_attached_dp(to_intel_connector(connector));
2046 
2047 	if (connector->status != connector_status_connected)
2048 		return -ENODEV;
2049 
2050 	seq_printf(m, "Panel power up delay: %d\n",
2051 		   intel_dp->panel_power_up_delay);
2052 	seq_printf(m, "Panel power down delay: %d\n",
2053 		   intel_dp->panel_power_down_delay);
2054 	seq_printf(m, "Backlight on delay: %d\n",
2055 		   intel_dp->backlight_on_delay);
2056 	seq_printf(m, "Backlight off delay: %d\n",
2057 		   intel_dp->backlight_off_delay);
2058 
2059 	return 0;
2060 }
2061 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2062 
2063 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2064 {
2065 	struct drm_connector *connector = m->private;
2066 	struct intel_connector *intel_connector = to_intel_connector(connector);
2067 
2068 	if (connector->status != connector_status_connected)
2069 		return -ENODEV;
2070 
2071 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2072 		   connector->base.id);
2073 	intel_hdcp_info(m, intel_connector);
2074 
2075 	return 0;
2076 }
2077 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2078 
2079 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2080 				seq_puts(m, "LPSP: incapable\n"))
2081 
2082 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2083 {
2084 	struct drm_connector *connector = m->private;
2085 	struct drm_i915_private *i915 = to_i915(connector->dev);
2086 	struct intel_encoder *encoder;
2087 
2088 	encoder = intel_attached_encoder(to_intel_connector(connector));
2089 	if (!encoder)
2090 		return -ENODEV;
2091 
2092 	if (connector->status != connector_status_connected)
2093 		return -ENODEV;
2094 
2095 	switch (INTEL_GEN(i915)) {
2096 	case 12:
2097 		/*
2098 		 * Actually TGL can drive LPSP on port till DDI_C
2099 		 * but there is no physical connected DDI_C on TGL sku's,
2100 		 * even driver is not initilizing DDI_C port for gen12.
2101 		 */
2102 		LPSP_CAPABLE(encoder->port <= PORT_B);
2103 		break;
2104 	case 11:
2105 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2106 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2107 		break;
2108 	case 10:
2109 	case 9:
2110 		LPSP_CAPABLE(encoder->port == PORT_A &&
2111 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2112 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2113 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2114 		break;
2115 	default:
2116 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2117 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2118 	}
2119 
2120 	return 0;
2121 }
2122 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2123 
2124 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2125 {
2126 	struct drm_connector *connector = m->private;
2127 	struct drm_device *dev = connector->dev;
2128 	struct drm_crtc *crtc;
2129 	struct intel_dp *intel_dp;
2130 	struct drm_modeset_acquire_ctx ctx;
2131 	struct intel_crtc_state *crtc_state = NULL;
2132 	int ret = 0;
2133 	bool try_again = false;
2134 
2135 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2136 
2137 	do {
2138 		try_again = false;
2139 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2140 				       &ctx);
2141 		if (ret) {
2142 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2143 				try_again = true;
2144 				continue;
2145 			}
2146 			break;
2147 		}
2148 		crtc = connector->state->crtc;
2149 		if (connector->status != connector_status_connected || !crtc) {
2150 			ret = -ENODEV;
2151 			break;
2152 		}
2153 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2154 		if (ret == -EDEADLK) {
2155 			ret = drm_modeset_backoff(&ctx);
2156 			if (!ret) {
2157 				try_again = true;
2158 				continue;
2159 			}
2160 			break;
2161 		} else if (ret) {
2162 			break;
2163 		}
2164 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2165 		crtc_state = to_intel_crtc_state(crtc->state);
2166 		seq_printf(m, "DSC_Enabled: %s\n",
2167 			   yesno(crtc_state->dsc.compression_enable));
2168 		seq_printf(m, "DSC_Sink_Support: %s\n",
2169 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2170 		seq_printf(m, "Force_DSC_Enable: %s\n",
2171 			   yesno(intel_dp->force_dsc_en));
2172 		if (!intel_dp_is_edp(intel_dp))
2173 			seq_printf(m, "FEC_Sink_Support: %s\n",
2174 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2175 	} while (try_again);
2176 
2177 	drm_modeset_drop_locks(&ctx);
2178 	drm_modeset_acquire_fini(&ctx);
2179 
2180 	return ret;
2181 }
2182 
2183 static ssize_t i915_dsc_fec_support_write(struct file *file,
2184 					  const char __user *ubuf,
2185 					  size_t len, loff_t *offp)
2186 {
2187 	bool dsc_enable = false;
2188 	int ret;
2189 	struct drm_connector *connector =
2190 		((struct seq_file *)file->private_data)->private;
2191 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2192 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2193 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2194 
2195 	if (len == 0)
2196 		return 0;
2197 
2198 	drm_dbg(&i915->drm,
2199 		"Copied %zu bytes from user to force DSC\n", len);
2200 
2201 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2202 	if (ret < 0)
2203 		return ret;
2204 
2205 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2206 		(dsc_enable) ? "true" : "false");
2207 	intel_dp->force_dsc_en = dsc_enable;
2208 
2209 	*offp += len;
2210 	return len;
2211 }
2212 
2213 static int i915_dsc_fec_support_open(struct inode *inode,
2214 				     struct file *file)
2215 {
2216 	return single_open(file, i915_dsc_fec_support_show,
2217 			   inode->i_private);
2218 }
2219 
2220 static const struct file_operations i915_dsc_fec_support_fops = {
2221 	.owner = THIS_MODULE,
2222 	.open = i915_dsc_fec_support_open,
2223 	.read = seq_read,
2224 	.llseek = seq_lseek,
2225 	.release = single_release,
2226 	.write = i915_dsc_fec_support_write
2227 };
2228 
2229 /**
2230  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2231  * @connector: pointer to a registered drm_connector
2232  *
2233  * Cleanup will be done by drm_connector_unregister() through a call to
2234  * drm_debugfs_connector_remove().
2235  *
2236  * Returns 0 on success, negative error codes on error.
2237  */
2238 int intel_connector_debugfs_add(struct drm_connector *connector)
2239 {
2240 	struct dentry *root = connector->debugfs_entry;
2241 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2242 
2243 	/* The connector must have been registered beforehands. */
2244 	if (!root)
2245 		return -ENODEV;
2246 
2247 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2248 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2249 				    connector, &i915_panel_fops);
2250 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2251 				    connector, &i915_psr_sink_status_fops);
2252 	}
2253 
2254 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2255 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2256 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2257 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2258 				    connector, &i915_hdcp_sink_capability_fops);
2259 	}
2260 
2261 	if (INTEL_GEN(dev_priv) >= 10 &&
2262 	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2263 	      !to_intel_connector(connector)->mst_port) ||
2264 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2265 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2266 				    connector, &i915_dsc_fec_support_fops);
2267 
2268 	/* Legacy panels doesn't lpsp on any platform */
2269 	if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2270 	     IS_BROADWELL(dev_priv)) &&
2271 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2272 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2273 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2274 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2275 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2276 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2277 				    connector, &i915_lpsp_capability_fops);
2278 
2279 	return 0;
2280 }
2281