xref: /openbmc/linux/drivers/gpu/drm/i915/display/intel_display_debugfs.c (revision 15a1fbdcfb519c2bd291ed01c6c94e0b89537a77)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_types.h"
13 #include "intel_dp.h"
14 #include "intel_fbc.h"
15 #include "intel_hdcp.h"
16 #include "intel_hdmi.h"
17 #include "intel_pm.h"
18 #include "intel_psr.h"
19 #include "intel_sideband.h"
20 
21 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
22 {
23 	return to_i915(node->minor->dev);
24 }
25 
26 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
27 {
28 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
29 
30 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
31 		   dev_priv->fb_tracking.busy_bits);
32 
33 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
34 		   dev_priv->fb_tracking.flip_bits);
35 
36 	return 0;
37 }
38 
39 static int i915_fbc_status(struct seq_file *m, void *unused)
40 {
41 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
42 	struct intel_fbc *fbc = &dev_priv->fbc;
43 	intel_wakeref_t wakeref;
44 
45 	if (!HAS_FBC(dev_priv))
46 		return -ENODEV;
47 
48 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
49 	mutex_lock(&fbc->lock);
50 
51 	if (intel_fbc_is_active(dev_priv))
52 		seq_puts(m, "FBC enabled\n");
53 	else
54 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
55 
56 	if (intel_fbc_is_active(dev_priv)) {
57 		u32 mask;
58 
59 		if (INTEL_GEN(dev_priv) >= 8)
60 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
61 		else if (INTEL_GEN(dev_priv) >= 7)
62 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
63 		else if (INTEL_GEN(dev_priv) >= 5)
64 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
65 		else if (IS_G4X(dev_priv))
66 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
67 		else
68 			mask = intel_de_read(dev_priv, FBC_STATUS) &
69 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
70 
71 		seq_printf(m, "Compressing: %s\n", yesno(mask));
72 	}
73 
74 	mutex_unlock(&fbc->lock);
75 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
76 
77 	return 0;
78 }
79 
80 static int i915_fbc_false_color_get(void *data, u64 *val)
81 {
82 	struct drm_i915_private *dev_priv = data;
83 
84 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
85 		return -ENODEV;
86 
87 	*val = dev_priv->fbc.false_color;
88 
89 	return 0;
90 }
91 
92 static int i915_fbc_false_color_set(void *data, u64 val)
93 {
94 	struct drm_i915_private *dev_priv = data;
95 	u32 reg;
96 
97 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
98 		return -ENODEV;
99 
100 	mutex_lock(&dev_priv->fbc.lock);
101 
102 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
103 	dev_priv->fbc.false_color = val;
104 
105 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
106 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
107 
108 	mutex_unlock(&dev_priv->fbc.lock);
109 	return 0;
110 }
111 
112 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
113 			i915_fbc_false_color_get, i915_fbc_false_color_set,
114 			"%llu\n");
115 
116 static int i915_ips_status(struct seq_file *m, void *unused)
117 {
118 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
119 	intel_wakeref_t wakeref;
120 
121 	if (!HAS_IPS(dev_priv))
122 		return -ENODEV;
123 
124 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
125 
126 	seq_printf(m, "Enabled by kernel parameter: %s\n",
127 		   yesno(i915_modparams.enable_ips));
128 
129 	if (INTEL_GEN(dev_priv) >= 8) {
130 		seq_puts(m, "Currently: unknown\n");
131 	} else {
132 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
133 			seq_puts(m, "Currently: enabled\n");
134 		else
135 			seq_puts(m, "Currently: disabled\n");
136 	}
137 
138 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
139 
140 	return 0;
141 }
142 
143 static int i915_sr_status(struct seq_file *m, void *unused)
144 {
145 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
146 	intel_wakeref_t wakeref;
147 	bool sr_enabled = false;
148 
149 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
150 
151 	if (INTEL_GEN(dev_priv) >= 9)
152 		/* no global SR status; inspect per-plane WM */;
153 	else if (HAS_PCH_SPLIT(dev_priv))
154 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
155 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
156 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
157 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
158 	else if (IS_I915GM(dev_priv))
159 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
160 	else if (IS_PINEVIEW(dev_priv))
161 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
162 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
163 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
164 
165 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
166 
167 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
168 
169 	return 0;
170 }
171 
172 static int i915_opregion(struct seq_file *m, void *unused)
173 {
174 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
175 
176 	if (opregion->header)
177 		seq_write(m, opregion->header, OPREGION_SIZE);
178 
179 	return 0;
180 }
181 
182 static int i915_vbt(struct seq_file *m, void *unused)
183 {
184 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
185 
186 	if (opregion->vbt)
187 		seq_write(m, opregion->vbt, opregion->vbt_size);
188 
189 	return 0;
190 }
191 
192 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
193 {
194 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
195 	struct drm_device *dev = &dev_priv->drm;
196 	struct intel_framebuffer *fbdev_fb = NULL;
197 	struct drm_framebuffer *drm_fb;
198 
199 #ifdef CONFIG_DRM_FBDEV_EMULATION
200 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
201 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
202 
203 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
204 			   fbdev_fb->base.width,
205 			   fbdev_fb->base.height,
206 			   fbdev_fb->base.format->depth,
207 			   fbdev_fb->base.format->cpp[0] * 8,
208 			   fbdev_fb->base.modifier,
209 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
210 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
211 		seq_putc(m, '\n');
212 	}
213 #endif
214 
215 	mutex_lock(&dev->mode_config.fb_lock);
216 	drm_for_each_fb(drm_fb, dev) {
217 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
218 		if (fb == fbdev_fb)
219 			continue;
220 
221 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
222 			   fb->base.width,
223 			   fb->base.height,
224 			   fb->base.format->depth,
225 			   fb->base.format->cpp[0] * 8,
226 			   fb->base.modifier,
227 			   drm_framebuffer_read_refcount(&fb->base));
228 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
229 		seq_putc(m, '\n');
230 	}
231 	mutex_unlock(&dev->mode_config.fb_lock);
232 
233 	return 0;
234 }
235 
236 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
237 {
238 	u8 val;
239 	static const char * const sink_status[] = {
240 		"inactive",
241 		"transition to active, capture and display",
242 		"active, display from RFB",
243 		"active, capture and display on sink device timings",
244 		"transition to inactive, capture and display, timing re-sync",
245 		"reserved",
246 		"reserved",
247 		"sink internal error",
248 	};
249 	struct drm_connector *connector = m->private;
250 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
251 	struct intel_dp *intel_dp =
252 		intel_attached_dp(to_intel_connector(connector));
253 	int ret;
254 
255 	if (!CAN_PSR(dev_priv)) {
256 		seq_puts(m, "PSR Unsupported\n");
257 		return -ENODEV;
258 	}
259 
260 	if (connector->status != connector_status_connected)
261 		return -ENODEV;
262 
263 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
264 
265 	if (ret == 1) {
266 		const char *str = "unknown";
267 
268 		val &= DP_PSR_SINK_STATE_MASK;
269 		if (val < ARRAY_SIZE(sink_status))
270 			str = sink_status[val];
271 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
272 	} else {
273 		return ret;
274 	}
275 
276 	return 0;
277 }
278 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
279 
280 static void
281 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
282 {
283 	u32 val, status_val;
284 	const char *status = "unknown";
285 
286 	if (dev_priv->psr.psr2_enabled) {
287 		static const char * const live_status[] = {
288 			"IDLE",
289 			"CAPTURE",
290 			"CAPTURE_FS",
291 			"SLEEP",
292 			"BUFON_FW",
293 			"ML_UP",
294 			"SU_STANDBY",
295 			"FAST_SLEEP",
296 			"DEEP_SLEEP",
297 			"BUF_ON",
298 			"TG_ON"
299 		};
300 		val = intel_de_read(dev_priv,
301 				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
302 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
303 			      EDP_PSR2_STATUS_STATE_SHIFT;
304 		if (status_val < ARRAY_SIZE(live_status))
305 			status = live_status[status_val];
306 	} else {
307 		static const char * const live_status[] = {
308 			"IDLE",
309 			"SRDONACK",
310 			"SRDENT",
311 			"BUFOFF",
312 			"BUFON",
313 			"AUXACK",
314 			"SRDOFFACK",
315 			"SRDENT_ON",
316 		};
317 		val = intel_de_read(dev_priv,
318 				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
319 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
320 			      EDP_PSR_STATUS_STATE_SHIFT;
321 		if (status_val < ARRAY_SIZE(live_status))
322 			status = live_status[status_val];
323 	}
324 
325 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
326 }
327 
328 static int i915_edp_psr_status(struct seq_file *m, void *data)
329 {
330 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
331 	struct i915_psr *psr = &dev_priv->psr;
332 	intel_wakeref_t wakeref;
333 	const char *status;
334 	bool enabled;
335 	u32 val;
336 
337 	if (!HAS_PSR(dev_priv))
338 		return -ENODEV;
339 
340 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
341 	if (psr->dp)
342 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
343 	seq_puts(m, "\n");
344 
345 	if (!psr->sink_support)
346 		return 0;
347 
348 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
349 	mutex_lock(&psr->lock);
350 
351 	if (psr->enabled)
352 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
353 	else
354 		status = "disabled";
355 	seq_printf(m, "PSR mode: %s\n", status);
356 
357 	if (!psr->enabled) {
358 		seq_printf(m, "PSR sink not reliable: %s\n",
359 			   yesno(psr->sink_not_reliable));
360 
361 		goto unlock;
362 	}
363 
364 	if (psr->psr2_enabled) {
365 		val = intel_de_read(dev_priv,
366 				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
367 		enabled = val & EDP_PSR2_ENABLE;
368 	} else {
369 		val = intel_de_read(dev_priv,
370 				    EDP_PSR_CTL(dev_priv->psr.transcoder));
371 		enabled = val & EDP_PSR_ENABLE;
372 	}
373 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
374 		   enableddisabled(enabled), val);
375 	psr_source_status(dev_priv, m);
376 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
377 		   psr->busy_frontbuffer_bits);
378 
379 	/*
380 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
381 	 */
382 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
383 		val = intel_de_read(dev_priv,
384 				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
385 		val &= EDP_PSR_PERF_CNT_MASK;
386 		seq_printf(m, "Performance counter: %u\n", val);
387 	}
388 
389 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
390 		seq_printf(m, "Last attempted entry at: %lld\n",
391 			   psr->last_entry_attempt);
392 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
393 	}
394 
395 	if (psr->psr2_enabled) {
396 		u32 su_frames_val[3];
397 		int frame;
398 
399 		/*
400 		 * Reading all 3 registers before hand to minimize crossing a
401 		 * frame boundary between register reads
402 		 */
403 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
404 			val = intel_de_read(dev_priv,
405 					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
406 			su_frames_val[frame / 3] = val;
407 		}
408 
409 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
410 
411 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
412 			u32 su_blocks;
413 
414 			su_blocks = su_frames_val[frame / 3] &
415 				    PSR2_SU_STATUS_MASK(frame);
416 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
417 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
418 		}
419 	}
420 
421 unlock:
422 	mutex_unlock(&psr->lock);
423 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
424 
425 	return 0;
426 }
427 
428 static int
429 i915_edp_psr_debug_set(void *data, u64 val)
430 {
431 	struct drm_i915_private *dev_priv = data;
432 	intel_wakeref_t wakeref;
433 	int ret;
434 
435 	if (!CAN_PSR(dev_priv))
436 		return -ENODEV;
437 
438 	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
439 
440 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
441 
442 	ret = intel_psr_debug_set(dev_priv, val);
443 
444 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
445 
446 	return ret;
447 }
448 
449 static int
450 i915_edp_psr_debug_get(void *data, u64 *val)
451 {
452 	struct drm_i915_private *dev_priv = data;
453 
454 	if (!CAN_PSR(dev_priv))
455 		return -ENODEV;
456 
457 	*val = READ_ONCE(dev_priv->psr.debug);
458 	return 0;
459 }
460 
461 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
462 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
463 			"%llu\n");
464 
465 static int i915_power_domain_info(struct seq_file *m, void *unused)
466 {
467 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
468 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
469 	int i;
470 
471 	mutex_lock(&power_domains->lock);
472 
473 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
474 	for (i = 0; i < power_domains->power_well_count; i++) {
475 		struct i915_power_well *power_well;
476 		enum intel_display_power_domain power_domain;
477 
478 		power_well = &power_domains->power_wells[i];
479 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
480 			   power_well->count);
481 
482 		for_each_power_domain(power_domain, power_well->desc->domains)
483 			seq_printf(m, "  %-23s %d\n",
484 				 intel_display_power_domain_str(power_domain),
485 				 power_domains->domain_use_count[power_domain]);
486 	}
487 
488 	mutex_unlock(&power_domains->lock);
489 
490 	return 0;
491 }
492 
493 static int i915_dmc_info(struct seq_file *m, void *unused)
494 {
495 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
496 	intel_wakeref_t wakeref;
497 	struct intel_csr *csr;
498 	i915_reg_t dc5_reg, dc6_reg = {};
499 
500 	if (!HAS_CSR(dev_priv))
501 		return -ENODEV;
502 
503 	csr = &dev_priv->csr;
504 
505 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
506 
507 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
508 	seq_printf(m, "path: %s\n", csr->fw_path);
509 
510 	if (!csr->dmc_payload)
511 		goto out;
512 
513 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
514 		   CSR_VERSION_MINOR(csr->version));
515 
516 	if (INTEL_GEN(dev_priv) >= 12) {
517 		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
518 		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
519 		/*
520 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
521 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
522 		 * reg for DC3CO debugging and validation,
523 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
524 		 */
525 		seq_printf(m, "DC3CO count: %d\n",
526 			   intel_de_read(dev_priv, DMC_DEBUG3));
527 	} else {
528 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
529 						 SKL_CSR_DC3_DC5_COUNT;
530 		if (!IS_GEN9_LP(dev_priv))
531 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
532 	}
533 
534 	seq_printf(m, "DC3 -> DC5 count: %d\n",
535 		   intel_de_read(dev_priv, dc5_reg));
536 	if (dc6_reg.reg)
537 		seq_printf(m, "DC5 -> DC6 count: %d\n",
538 			   intel_de_read(dev_priv, dc6_reg));
539 
540 out:
541 	seq_printf(m, "program base: 0x%08x\n",
542 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
543 	seq_printf(m, "ssp base: 0x%08x\n",
544 		   intel_de_read(dev_priv, CSR_SSP_BASE));
545 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
546 
547 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
548 
549 	return 0;
550 }
551 
552 static void intel_seq_print_mode(struct seq_file *m, int tabs,
553 				 const struct drm_display_mode *mode)
554 {
555 	int i;
556 
557 	for (i = 0; i < tabs; i++)
558 		seq_putc(m, '\t');
559 
560 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
561 }
562 
563 static void intel_encoder_info(struct seq_file *m,
564 			       struct intel_crtc *crtc,
565 			       struct intel_encoder *encoder)
566 {
567 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
568 	struct drm_connector_list_iter conn_iter;
569 	struct drm_connector *connector;
570 
571 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
572 		   encoder->base.base.id, encoder->base.name);
573 
574 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
575 	drm_for_each_connector_iter(connector, &conn_iter) {
576 		const struct drm_connector_state *conn_state =
577 			connector->state;
578 
579 		if (conn_state->best_encoder != &encoder->base)
580 			continue;
581 
582 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
583 			   connector->base.id, connector->name);
584 	}
585 	drm_connector_list_iter_end(&conn_iter);
586 }
587 
588 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
589 {
590 	const struct drm_display_mode *mode = panel->fixed_mode;
591 
592 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
593 }
594 
595 static void intel_hdcp_info(struct seq_file *m,
596 			    struct intel_connector *intel_connector)
597 {
598 	bool hdcp_cap, hdcp2_cap;
599 
600 	hdcp_cap = intel_hdcp_capable(intel_connector);
601 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
602 
603 	if (hdcp_cap)
604 		seq_puts(m, "HDCP1.4 ");
605 	if (hdcp2_cap)
606 		seq_puts(m, "HDCP2.2 ");
607 
608 	if (!hdcp_cap && !hdcp2_cap)
609 		seq_puts(m, "None");
610 
611 	seq_puts(m, "\n");
612 }
613 
614 static void intel_dp_info(struct seq_file *m,
615 			  struct intel_connector *intel_connector)
616 {
617 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
618 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
619 
620 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
621 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
622 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
623 		intel_panel_info(m, &intel_connector->panel);
624 
625 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
626 				&intel_dp->aux);
627 	if (intel_connector->hdcp.shim) {
628 		seq_puts(m, "\tHDCP version: ");
629 		intel_hdcp_info(m, intel_connector);
630 	}
631 }
632 
633 static void intel_dp_mst_info(struct seq_file *m,
634 			  struct intel_connector *intel_connector)
635 {
636 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
637 	struct intel_dp_mst_encoder *intel_mst =
638 		enc_to_mst(intel_encoder);
639 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
640 	struct intel_dp *intel_dp = &intel_dig_port->dp;
641 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
642 					intel_connector->port);
643 
644 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
645 }
646 
647 static void intel_hdmi_info(struct seq_file *m,
648 			    struct intel_connector *intel_connector)
649 {
650 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
651 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
652 
653 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
654 	if (intel_connector->hdcp.shim) {
655 		seq_puts(m, "\tHDCP version: ");
656 		intel_hdcp_info(m, intel_connector);
657 	}
658 }
659 
660 static void intel_lvds_info(struct seq_file *m,
661 			    struct intel_connector *intel_connector)
662 {
663 	intel_panel_info(m, &intel_connector->panel);
664 }
665 
666 static void intel_connector_info(struct seq_file *m,
667 				 struct drm_connector *connector)
668 {
669 	struct intel_connector *intel_connector = to_intel_connector(connector);
670 	const struct drm_connector_state *conn_state = connector->state;
671 	struct intel_encoder *encoder =
672 		to_intel_encoder(conn_state->best_encoder);
673 	const struct drm_display_mode *mode;
674 
675 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
676 		   connector->base.id, connector->name,
677 		   drm_get_connector_status_name(connector->status));
678 
679 	if (connector->status == connector_status_disconnected)
680 		return;
681 
682 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
683 		   connector->display_info.width_mm,
684 		   connector->display_info.height_mm);
685 	seq_printf(m, "\tsubpixel order: %s\n",
686 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
687 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
688 
689 	if (!encoder)
690 		return;
691 
692 	switch (connector->connector_type) {
693 	case DRM_MODE_CONNECTOR_DisplayPort:
694 	case DRM_MODE_CONNECTOR_eDP:
695 		if (encoder->type == INTEL_OUTPUT_DP_MST)
696 			intel_dp_mst_info(m, intel_connector);
697 		else
698 			intel_dp_info(m, intel_connector);
699 		break;
700 	case DRM_MODE_CONNECTOR_LVDS:
701 		if (encoder->type == INTEL_OUTPUT_LVDS)
702 			intel_lvds_info(m, intel_connector);
703 		break;
704 	case DRM_MODE_CONNECTOR_HDMIA:
705 		if (encoder->type == INTEL_OUTPUT_HDMI ||
706 		    encoder->type == INTEL_OUTPUT_DDI)
707 			intel_hdmi_info(m, intel_connector);
708 		break;
709 	default:
710 		break;
711 	}
712 
713 	seq_printf(m, "\tmodes:\n");
714 	list_for_each_entry(mode, &connector->modes, head)
715 		intel_seq_print_mode(m, 2, mode);
716 }
717 
718 static const char *plane_type(enum drm_plane_type type)
719 {
720 	switch (type) {
721 	case DRM_PLANE_TYPE_OVERLAY:
722 		return "OVL";
723 	case DRM_PLANE_TYPE_PRIMARY:
724 		return "PRI";
725 	case DRM_PLANE_TYPE_CURSOR:
726 		return "CUR";
727 	/*
728 	 * Deliberately omitting default: to generate compiler warnings
729 	 * when a new drm_plane_type gets added.
730 	 */
731 	}
732 
733 	return "unknown";
734 }
735 
736 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
737 {
738 	/*
739 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
740 	 * will print them all to visualize if the values are misused
741 	 */
742 	snprintf(buf, bufsize,
743 		 "%s%s%s%s%s%s(0x%08x)",
744 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
745 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
746 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
747 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
748 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
749 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
750 		 rotation);
751 }
752 
753 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
754 {
755 	const struct intel_plane_state *plane_state =
756 		to_intel_plane_state(plane->base.state);
757 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
758 	struct drm_format_name_buf format_name;
759 	struct drm_rect src, dst;
760 	char rot_str[48];
761 
762 	src = drm_plane_state_src(&plane_state->uapi);
763 	dst = drm_plane_state_dest(&plane_state->uapi);
764 
765 	if (fb)
766 		drm_get_format_name(fb->format->format, &format_name);
767 
768 	plane_rotation(rot_str, sizeof(rot_str),
769 		       plane_state->uapi.rotation);
770 
771 	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
772 		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
773 		   fb ? fb->width : 0, fb ? fb->height : 0,
774 		   DRM_RECT_FP_ARG(&src),
775 		   DRM_RECT_ARG(&dst),
776 		   rot_str);
777 }
778 
779 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
780 {
781 	const struct intel_plane_state *plane_state =
782 		to_intel_plane_state(plane->base.state);
783 	const struct drm_framebuffer *fb = plane_state->hw.fb;
784 	struct drm_format_name_buf format_name;
785 	char rot_str[48];
786 
787 	if (!fb)
788 		return;
789 
790 	drm_get_format_name(fb->format->format, &format_name);
791 
792 	plane_rotation(rot_str, sizeof(rot_str),
793 		       plane_state->hw.rotation);
794 
795 	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
796 		   fb->base.id, format_name.str,
797 		   fb->width, fb->height,
798 		   yesno(plane_state->uapi.visible),
799 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
800 		   DRM_RECT_ARG(&plane_state->uapi.dst),
801 		   rot_str);
802 }
803 
804 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
805 {
806 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
807 	struct intel_plane *plane;
808 
809 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
810 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
811 			   plane->base.base.id, plane->base.name,
812 			   plane_type(plane->base.type));
813 		intel_plane_uapi_info(m, plane);
814 		intel_plane_hw_info(m, plane);
815 	}
816 }
817 
818 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
819 {
820 	const struct intel_crtc_state *crtc_state =
821 		to_intel_crtc_state(crtc->base.state);
822 	int num_scalers = crtc->num_scalers;
823 	int i;
824 
825 	/* Not all platformas have a scaler */
826 	if (num_scalers) {
827 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
828 			   num_scalers,
829 			   crtc_state->scaler_state.scaler_users,
830 			   crtc_state->scaler_state.scaler_id);
831 
832 		for (i = 0; i < num_scalers; i++) {
833 			const struct intel_scaler *sc =
834 				&crtc_state->scaler_state.scalers[i];
835 
836 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
837 				   i, yesno(sc->in_use), sc->mode);
838 		}
839 		seq_puts(m, "\n");
840 	} else {
841 		seq_puts(m, "\tNo scalers available on this platform\n");
842 	}
843 }
844 
845 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
846 {
847 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
848 	const struct intel_crtc_state *crtc_state =
849 		to_intel_crtc_state(crtc->base.state);
850 	struct intel_encoder *encoder;
851 
852 	seq_printf(m, "[CRTC:%d:%s]:\n",
853 		   crtc->base.base.id, crtc->base.name);
854 
855 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
856 		   yesno(crtc_state->uapi.enable),
857 		   yesno(crtc_state->uapi.active),
858 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
859 
860 	if (crtc_state->hw.enable) {
861 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
862 			   yesno(crtc_state->hw.active),
863 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
864 
865 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
866 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
867 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
868 
869 		intel_scaler_info(m, crtc);
870 	}
871 
872 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
873 				    crtc_state->uapi.encoder_mask)
874 		intel_encoder_info(m, crtc, encoder);
875 
876 	intel_plane_info(m, crtc);
877 
878 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
879 		   yesno(!crtc->cpu_fifo_underrun_disabled),
880 		   yesno(!crtc->pch_fifo_underrun_disabled));
881 }
882 
883 static int i915_display_info(struct seq_file *m, void *unused)
884 {
885 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
886 	struct drm_device *dev = &dev_priv->drm;
887 	struct intel_crtc *crtc;
888 	struct drm_connector *connector;
889 	struct drm_connector_list_iter conn_iter;
890 	intel_wakeref_t wakeref;
891 
892 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
893 
894 	drm_modeset_lock_all(dev);
895 
896 	seq_printf(m, "CRTC info\n");
897 	seq_printf(m, "---------\n");
898 	for_each_intel_crtc(dev, crtc)
899 		intel_crtc_info(m, crtc);
900 
901 	seq_printf(m, "\n");
902 	seq_printf(m, "Connector info\n");
903 	seq_printf(m, "--------------\n");
904 	drm_connector_list_iter_begin(dev, &conn_iter);
905 	drm_for_each_connector_iter(connector, &conn_iter)
906 		intel_connector_info(m, connector);
907 	drm_connector_list_iter_end(&conn_iter);
908 
909 	drm_modeset_unlock_all(dev);
910 
911 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
912 
913 	return 0;
914 }
915 
916 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
917 {
918 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
919 	struct drm_device *dev = &dev_priv->drm;
920 	int i;
921 
922 	drm_modeset_lock_all(dev);
923 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
924 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
925 
926 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
927 			   pll->info->id);
928 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
929 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
930 		seq_printf(m, " tracked hardware state:\n");
931 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
932 		seq_printf(m, " dpll_md: 0x%08x\n",
933 			   pll->state.hw_state.dpll_md);
934 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
935 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
936 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
937 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
938 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
939 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
940 			   pll->state.hw_state.mg_refclkin_ctl);
941 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
942 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
943 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
944 			   pll->state.hw_state.mg_clktop2_hsclkctl);
945 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
946 			   pll->state.hw_state.mg_pll_div0);
947 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
948 			   pll->state.hw_state.mg_pll_div1);
949 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
950 			   pll->state.hw_state.mg_pll_lf);
951 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
952 			   pll->state.hw_state.mg_pll_frac_lock);
953 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
954 			   pll->state.hw_state.mg_pll_ssc);
955 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
956 			   pll->state.hw_state.mg_pll_bias);
957 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
958 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
959 	}
960 	drm_modeset_unlock_all(dev);
961 
962 	return 0;
963 }
964 
965 static int i915_ipc_status_show(struct seq_file *m, void *data)
966 {
967 	struct drm_i915_private *dev_priv = m->private;
968 
969 	seq_printf(m, "Isochronous Priority Control: %s\n",
970 			yesno(dev_priv->ipc_enabled));
971 	return 0;
972 }
973 
974 static int i915_ipc_status_open(struct inode *inode, struct file *file)
975 {
976 	struct drm_i915_private *dev_priv = inode->i_private;
977 
978 	if (!HAS_IPC(dev_priv))
979 		return -ENODEV;
980 
981 	return single_open(file, i915_ipc_status_show, dev_priv);
982 }
983 
984 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
985 				     size_t len, loff_t *offp)
986 {
987 	struct seq_file *m = file->private_data;
988 	struct drm_i915_private *dev_priv = m->private;
989 	intel_wakeref_t wakeref;
990 	bool enable;
991 	int ret;
992 
993 	ret = kstrtobool_from_user(ubuf, len, &enable);
994 	if (ret < 0)
995 		return ret;
996 
997 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
998 		if (!dev_priv->ipc_enabled && enable)
999 			drm_info(&dev_priv->drm,
1000 				 "Enabling IPC: WM will be proper only after next commit\n");
1001 		dev_priv->wm.distrust_bios_wm = true;
1002 		dev_priv->ipc_enabled = enable;
1003 		intel_enable_ipc(dev_priv);
1004 	}
1005 
1006 	return len;
1007 }
1008 
1009 static const struct file_operations i915_ipc_status_fops = {
1010 	.owner = THIS_MODULE,
1011 	.open = i915_ipc_status_open,
1012 	.read = seq_read,
1013 	.llseek = seq_lseek,
1014 	.release = single_release,
1015 	.write = i915_ipc_status_write
1016 };
1017 
1018 static int i915_ddb_info(struct seq_file *m, void *unused)
1019 {
1020 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1021 	struct drm_device *dev = &dev_priv->drm;
1022 	struct skl_ddb_entry *entry;
1023 	struct intel_crtc *crtc;
1024 
1025 	if (INTEL_GEN(dev_priv) < 9)
1026 		return -ENODEV;
1027 
1028 	drm_modeset_lock_all(dev);
1029 
1030 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1031 
1032 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1033 		struct intel_crtc_state *crtc_state =
1034 			to_intel_crtc_state(crtc->base.state);
1035 		enum pipe pipe = crtc->pipe;
1036 		enum plane_id plane_id;
1037 
1038 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1039 
1040 		for_each_plane_id_on_crtc(crtc, plane_id) {
1041 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1042 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1043 				   entry->start, entry->end,
1044 				   skl_ddb_entry_size(entry));
1045 		}
1046 
1047 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1048 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1049 			   entry->end, skl_ddb_entry_size(entry));
1050 	}
1051 
1052 	drm_modeset_unlock_all(dev);
1053 
1054 	return 0;
1055 }
1056 
1057 static void drrs_status_per_crtc(struct seq_file *m,
1058 				 struct drm_device *dev,
1059 				 struct intel_crtc *intel_crtc)
1060 {
1061 	struct drm_i915_private *dev_priv = to_i915(dev);
1062 	struct i915_drrs *drrs = &dev_priv->drrs;
1063 	int vrefresh = 0;
1064 	struct drm_connector *connector;
1065 	struct drm_connector_list_iter conn_iter;
1066 
1067 	drm_connector_list_iter_begin(dev, &conn_iter);
1068 	drm_for_each_connector_iter(connector, &conn_iter) {
1069 		if (connector->state->crtc != &intel_crtc->base)
1070 			continue;
1071 
1072 		seq_printf(m, "%s:\n", connector->name);
1073 	}
1074 	drm_connector_list_iter_end(&conn_iter);
1075 
1076 	seq_puts(m, "\n");
1077 
1078 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1079 		struct intel_panel *panel;
1080 
1081 		mutex_lock(&drrs->mutex);
1082 		/* DRRS Supported */
1083 		seq_puts(m, "\tDRRS Supported: Yes\n");
1084 
1085 		/* disable_drrs() will make drrs->dp NULL */
1086 		if (!drrs->dp) {
1087 			seq_puts(m, "Idleness DRRS: Disabled\n");
1088 			if (dev_priv->psr.enabled)
1089 				seq_puts(m,
1090 				"\tAs PSR is enabled, DRRS is not enabled\n");
1091 			mutex_unlock(&drrs->mutex);
1092 			return;
1093 		}
1094 
1095 		panel = &drrs->dp->attached_connector->panel;
1096 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1097 					drrs->busy_frontbuffer_bits);
1098 
1099 		seq_puts(m, "\n\t\t");
1100 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1101 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1102 			vrefresh = panel->fixed_mode->vrefresh;
1103 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1104 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1105 			vrefresh = panel->downclock_mode->vrefresh;
1106 		} else {
1107 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1108 						drrs->refresh_rate_type);
1109 			mutex_unlock(&drrs->mutex);
1110 			return;
1111 		}
1112 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1113 
1114 		seq_puts(m, "\n\t\t");
1115 		mutex_unlock(&drrs->mutex);
1116 	} else {
1117 		/* DRRS not supported. Print the VBT parameter*/
1118 		seq_puts(m, "\tDRRS Supported : No");
1119 	}
1120 	seq_puts(m, "\n");
1121 }
1122 
1123 static int i915_drrs_status(struct seq_file *m, void *unused)
1124 {
1125 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1126 	struct drm_device *dev = &dev_priv->drm;
1127 	struct intel_crtc *intel_crtc;
1128 	int active_crtc_cnt = 0;
1129 
1130 	drm_modeset_lock_all(dev);
1131 	for_each_intel_crtc(dev, intel_crtc) {
1132 		if (intel_crtc->base.state->active) {
1133 			active_crtc_cnt++;
1134 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1135 
1136 			drrs_status_per_crtc(m, dev, intel_crtc);
1137 		}
1138 	}
1139 	drm_modeset_unlock_all(dev);
1140 
1141 	if (!active_crtc_cnt)
1142 		seq_puts(m, "No active crtc found\n");
1143 
1144 	return 0;
1145 }
1146 
1147 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1148 {
1149 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1150 	struct drm_device *dev = &dev_priv->drm;
1151 	struct intel_encoder *intel_encoder;
1152 	struct intel_digital_port *intel_dig_port;
1153 	struct drm_connector *connector;
1154 	struct drm_connector_list_iter conn_iter;
1155 
1156 	drm_connector_list_iter_begin(dev, &conn_iter);
1157 	drm_for_each_connector_iter(connector, &conn_iter) {
1158 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1159 			continue;
1160 
1161 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1162 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1163 			continue;
1164 
1165 		intel_dig_port = enc_to_dig_port(intel_encoder);
1166 		if (!intel_dig_port->dp.can_mst)
1167 			continue;
1168 
1169 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1170 			   intel_dig_port->base.base.base.id,
1171 			   intel_dig_port->base.base.name);
1172 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
1173 	}
1174 	drm_connector_list_iter_end(&conn_iter);
1175 
1176 	return 0;
1177 }
1178 
1179 static ssize_t i915_displayport_test_active_write(struct file *file,
1180 						  const char __user *ubuf,
1181 						  size_t len, loff_t *offp)
1182 {
1183 	char *input_buffer;
1184 	int status = 0;
1185 	struct drm_device *dev;
1186 	struct drm_connector *connector;
1187 	struct drm_connector_list_iter conn_iter;
1188 	struct intel_dp *intel_dp;
1189 	int val = 0;
1190 
1191 	dev = ((struct seq_file *)file->private_data)->private;
1192 
1193 	if (len == 0)
1194 		return 0;
1195 
1196 	input_buffer = memdup_user_nul(ubuf, len);
1197 	if (IS_ERR(input_buffer))
1198 		return PTR_ERR(input_buffer);
1199 
1200 	drm_dbg(&to_i915(dev)->drm,
1201 		"Copied %d bytes from user\n", (unsigned int)len);
1202 
1203 	drm_connector_list_iter_begin(dev, &conn_iter);
1204 	drm_for_each_connector_iter(connector, &conn_iter) {
1205 		struct intel_encoder *encoder;
1206 
1207 		if (connector->connector_type !=
1208 		    DRM_MODE_CONNECTOR_DisplayPort)
1209 			continue;
1210 
1211 		encoder = to_intel_encoder(connector->encoder);
1212 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1213 			continue;
1214 
1215 		if (encoder && connector->status == connector_status_connected) {
1216 			intel_dp = enc_to_intel_dp(encoder);
1217 			status = kstrtoint(input_buffer, 10, &val);
1218 			if (status < 0)
1219 				break;
1220 			drm_dbg(&to_i915(dev)->drm,
1221 				"Got %d for test active\n", val);
1222 			/* To prevent erroneous activation of the compliance
1223 			 * testing code, only accept an actual value of 1 here
1224 			 */
1225 			if (val == 1)
1226 				intel_dp->compliance.test_active = true;
1227 			else
1228 				intel_dp->compliance.test_active = false;
1229 		}
1230 	}
1231 	drm_connector_list_iter_end(&conn_iter);
1232 	kfree(input_buffer);
1233 	if (status < 0)
1234 		return status;
1235 
1236 	*offp += len;
1237 	return len;
1238 }
1239 
1240 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1241 {
1242 	struct drm_i915_private *dev_priv = m->private;
1243 	struct drm_device *dev = &dev_priv->drm;
1244 	struct drm_connector *connector;
1245 	struct drm_connector_list_iter conn_iter;
1246 	struct intel_dp *intel_dp;
1247 
1248 	drm_connector_list_iter_begin(dev, &conn_iter);
1249 	drm_for_each_connector_iter(connector, &conn_iter) {
1250 		struct intel_encoder *encoder;
1251 
1252 		if (connector->connector_type !=
1253 		    DRM_MODE_CONNECTOR_DisplayPort)
1254 			continue;
1255 
1256 		encoder = to_intel_encoder(connector->encoder);
1257 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1258 			continue;
1259 
1260 		if (encoder && connector->status == connector_status_connected) {
1261 			intel_dp = enc_to_intel_dp(encoder);
1262 			if (intel_dp->compliance.test_active)
1263 				seq_puts(m, "1");
1264 			else
1265 				seq_puts(m, "0");
1266 		} else
1267 			seq_puts(m, "0");
1268 	}
1269 	drm_connector_list_iter_end(&conn_iter);
1270 
1271 	return 0;
1272 }
1273 
1274 static int i915_displayport_test_active_open(struct inode *inode,
1275 					     struct file *file)
1276 {
1277 	return single_open(file, i915_displayport_test_active_show,
1278 			   inode->i_private);
1279 }
1280 
1281 static const struct file_operations i915_displayport_test_active_fops = {
1282 	.owner = THIS_MODULE,
1283 	.open = i915_displayport_test_active_open,
1284 	.read = seq_read,
1285 	.llseek = seq_lseek,
1286 	.release = single_release,
1287 	.write = i915_displayport_test_active_write
1288 };
1289 
1290 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1291 {
1292 	struct drm_i915_private *dev_priv = m->private;
1293 	struct drm_device *dev = &dev_priv->drm;
1294 	struct drm_connector *connector;
1295 	struct drm_connector_list_iter conn_iter;
1296 	struct intel_dp *intel_dp;
1297 
1298 	drm_connector_list_iter_begin(dev, &conn_iter);
1299 	drm_for_each_connector_iter(connector, &conn_iter) {
1300 		struct intel_encoder *encoder;
1301 
1302 		if (connector->connector_type !=
1303 		    DRM_MODE_CONNECTOR_DisplayPort)
1304 			continue;
1305 
1306 		encoder = to_intel_encoder(connector->encoder);
1307 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1308 			continue;
1309 
1310 		if (encoder && connector->status == connector_status_connected) {
1311 			intel_dp = enc_to_intel_dp(encoder);
1312 			if (intel_dp->compliance.test_type ==
1313 			    DP_TEST_LINK_EDID_READ)
1314 				seq_printf(m, "%lx",
1315 					   intel_dp->compliance.test_data.edid);
1316 			else if (intel_dp->compliance.test_type ==
1317 				 DP_TEST_LINK_VIDEO_PATTERN) {
1318 				seq_printf(m, "hdisplay: %d\n",
1319 					   intel_dp->compliance.test_data.hdisplay);
1320 				seq_printf(m, "vdisplay: %d\n",
1321 					   intel_dp->compliance.test_data.vdisplay);
1322 				seq_printf(m, "bpc: %u\n",
1323 					   intel_dp->compliance.test_data.bpc);
1324 			}
1325 		} else
1326 			seq_puts(m, "0");
1327 	}
1328 	drm_connector_list_iter_end(&conn_iter);
1329 
1330 	return 0;
1331 }
1332 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1333 
1334 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1335 {
1336 	struct drm_i915_private *dev_priv = m->private;
1337 	struct drm_device *dev = &dev_priv->drm;
1338 	struct drm_connector *connector;
1339 	struct drm_connector_list_iter conn_iter;
1340 	struct intel_dp *intel_dp;
1341 
1342 	drm_connector_list_iter_begin(dev, &conn_iter);
1343 	drm_for_each_connector_iter(connector, &conn_iter) {
1344 		struct intel_encoder *encoder;
1345 
1346 		if (connector->connector_type !=
1347 		    DRM_MODE_CONNECTOR_DisplayPort)
1348 			continue;
1349 
1350 		encoder = to_intel_encoder(connector->encoder);
1351 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1352 			continue;
1353 
1354 		if (encoder && connector->status == connector_status_connected) {
1355 			intel_dp = enc_to_intel_dp(encoder);
1356 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
1357 		} else
1358 			seq_puts(m, "0");
1359 	}
1360 	drm_connector_list_iter_end(&conn_iter);
1361 
1362 	return 0;
1363 }
1364 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1365 
1366 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1367 {
1368 	struct drm_i915_private *dev_priv = m->private;
1369 	struct drm_device *dev = &dev_priv->drm;
1370 	int level;
1371 	int num_levels;
1372 
1373 	if (IS_CHERRYVIEW(dev_priv))
1374 		num_levels = 3;
1375 	else if (IS_VALLEYVIEW(dev_priv))
1376 		num_levels = 1;
1377 	else if (IS_G4X(dev_priv))
1378 		num_levels = 3;
1379 	else
1380 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1381 
1382 	drm_modeset_lock_all(dev);
1383 
1384 	for (level = 0; level < num_levels; level++) {
1385 		unsigned int latency = wm[level];
1386 
1387 		/*
1388 		 * - WM1+ latency values in 0.5us units
1389 		 * - latencies are in us on gen9/vlv/chv
1390 		 */
1391 		if (INTEL_GEN(dev_priv) >= 9 ||
1392 		    IS_VALLEYVIEW(dev_priv) ||
1393 		    IS_CHERRYVIEW(dev_priv) ||
1394 		    IS_G4X(dev_priv))
1395 			latency *= 10;
1396 		else if (level > 0)
1397 			latency *= 5;
1398 
1399 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1400 			   level, wm[level], latency / 10, latency % 10);
1401 	}
1402 
1403 	drm_modeset_unlock_all(dev);
1404 }
1405 
1406 static int pri_wm_latency_show(struct seq_file *m, void *data)
1407 {
1408 	struct drm_i915_private *dev_priv = m->private;
1409 	const u16 *latencies;
1410 
1411 	if (INTEL_GEN(dev_priv) >= 9)
1412 		latencies = dev_priv->wm.skl_latency;
1413 	else
1414 		latencies = dev_priv->wm.pri_latency;
1415 
1416 	wm_latency_show(m, latencies);
1417 
1418 	return 0;
1419 }
1420 
1421 static int spr_wm_latency_show(struct seq_file *m, void *data)
1422 {
1423 	struct drm_i915_private *dev_priv = m->private;
1424 	const u16 *latencies;
1425 
1426 	if (INTEL_GEN(dev_priv) >= 9)
1427 		latencies = dev_priv->wm.skl_latency;
1428 	else
1429 		latencies = dev_priv->wm.spr_latency;
1430 
1431 	wm_latency_show(m, latencies);
1432 
1433 	return 0;
1434 }
1435 
1436 static int cur_wm_latency_show(struct seq_file *m, void *data)
1437 {
1438 	struct drm_i915_private *dev_priv = m->private;
1439 	const u16 *latencies;
1440 
1441 	if (INTEL_GEN(dev_priv) >= 9)
1442 		latencies = dev_priv->wm.skl_latency;
1443 	else
1444 		latencies = dev_priv->wm.cur_latency;
1445 
1446 	wm_latency_show(m, latencies);
1447 
1448 	return 0;
1449 }
1450 
1451 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1452 {
1453 	struct drm_i915_private *dev_priv = inode->i_private;
1454 
1455 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1456 		return -ENODEV;
1457 
1458 	return single_open(file, pri_wm_latency_show, dev_priv);
1459 }
1460 
1461 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1462 {
1463 	struct drm_i915_private *dev_priv = inode->i_private;
1464 
1465 	if (HAS_GMCH(dev_priv))
1466 		return -ENODEV;
1467 
1468 	return single_open(file, spr_wm_latency_show, dev_priv);
1469 }
1470 
1471 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1472 {
1473 	struct drm_i915_private *dev_priv = inode->i_private;
1474 
1475 	if (HAS_GMCH(dev_priv))
1476 		return -ENODEV;
1477 
1478 	return single_open(file, cur_wm_latency_show, dev_priv);
1479 }
1480 
1481 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1482 				size_t len, loff_t *offp, u16 wm[8])
1483 {
1484 	struct seq_file *m = file->private_data;
1485 	struct drm_i915_private *dev_priv = m->private;
1486 	struct drm_device *dev = &dev_priv->drm;
1487 	u16 new[8] = { 0 };
1488 	int num_levels;
1489 	int level;
1490 	int ret;
1491 	char tmp[32];
1492 
1493 	if (IS_CHERRYVIEW(dev_priv))
1494 		num_levels = 3;
1495 	else if (IS_VALLEYVIEW(dev_priv))
1496 		num_levels = 1;
1497 	else if (IS_G4X(dev_priv))
1498 		num_levels = 3;
1499 	else
1500 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1501 
1502 	if (len >= sizeof(tmp))
1503 		return -EINVAL;
1504 
1505 	if (copy_from_user(tmp, ubuf, len))
1506 		return -EFAULT;
1507 
1508 	tmp[len] = '\0';
1509 
1510 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1511 		     &new[0], &new[1], &new[2], &new[3],
1512 		     &new[4], &new[5], &new[6], &new[7]);
1513 	if (ret != num_levels)
1514 		return -EINVAL;
1515 
1516 	drm_modeset_lock_all(dev);
1517 
1518 	for (level = 0; level < num_levels; level++)
1519 		wm[level] = new[level];
1520 
1521 	drm_modeset_unlock_all(dev);
1522 
1523 	return len;
1524 }
1525 
1526 
1527 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1528 				    size_t len, loff_t *offp)
1529 {
1530 	struct seq_file *m = file->private_data;
1531 	struct drm_i915_private *dev_priv = m->private;
1532 	u16 *latencies;
1533 
1534 	if (INTEL_GEN(dev_priv) >= 9)
1535 		latencies = dev_priv->wm.skl_latency;
1536 	else
1537 		latencies = dev_priv->wm.pri_latency;
1538 
1539 	return wm_latency_write(file, ubuf, len, offp, latencies);
1540 }
1541 
1542 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1543 				    size_t len, loff_t *offp)
1544 {
1545 	struct seq_file *m = file->private_data;
1546 	struct drm_i915_private *dev_priv = m->private;
1547 	u16 *latencies;
1548 
1549 	if (INTEL_GEN(dev_priv) >= 9)
1550 		latencies = dev_priv->wm.skl_latency;
1551 	else
1552 		latencies = dev_priv->wm.spr_latency;
1553 
1554 	return wm_latency_write(file, ubuf, len, offp, latencies);
1555 }
1556 
1557 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1558 				    size_t len, loff_t *offp)
1559 {
1560 	struct seq_file *m = file->private_data;
1561 	struct drm_i915_private *dev_priv = m->private;
1562 	u16 *latencies;
1563 
1564 	if (INTEL_GEN(dev_priv) >= 9)
1565 		latencies = dev_priv->wm.skl_latency;
1566 	else
1567 		latencies = dev_priv->wm.cur_latency;
1568 
1569 	return wm_latency_write(file, ubuf, len, offp, latencies);
1570 }
1571 
1572 static const struct file_operations i915_pri_wm_latency_fops = {
1573 	.owner = THIS_MODULE,
1574 	.open = pri_wm_latency_open,
1575 	.read = seq_read,
1576 	.llseek = seq_lseek,
1577 	.release = single_release,
1578 	.write = pri_wm_latency_write
1579 };
1580 
1581 static const struct file_operations i915_spr_wm_latency_fops = {
1582 	.owner = THIS_MODULE,
1583 	.open = spr_wm_latency_open,
1584 	.read = seq_read,
1585 	.llseek = seq_lseek,
1586 	.release = single_release,
1587 	.write = spr_wm_latency_write
1588 };
1589 
1590 static const struct file_operations i915_cur_wm_latency_fops = {
1591 	.owner = THIS_MODULE,
1592 	.open = cur_wm_latency_open,
1593 	.read = seq_read,
1594 	.llseek = seq_lseek,
1595 	.release = single_release,
1596 	.write = cur_wm_latency_write
1597 };
1598 
1599 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1600 {
1601 	struct drm_i915_private *dev_priv = m->private;
1602 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1603 
1604 	/* Synchronize with everything first in case there's been an HPD
1605 	 * storm, but we haven't finished handling it in the kernel yet
1606 	 */
1607 	intel_synchronize_irq(dev_priv);
1608 	flush_work(&dev_priv->hotplug.dig_port_work);
1609 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1610 
1611 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1612 	seq_printf(m, "Detected: %s\n",
1613 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1614 
1615 	return 0;
1616 }
1617 
1618 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1619 					const char __user *ubuf, size_t len,
1620 					loff_t *offp)
1621 {
1622 	struct seq_file *m = file->private_data;
1623 	struct drm_i915_private *dev_priv = m->private;
1624 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1625 	unsigned int new_threshold;
1626 	int i;
1627 	char *newline;
1628 	char tmp[16];
1629 
1630 	if (len >= sizeof(tmp))
1631 		return -EINVAL;
1632 
1633 	if (copy_from_user(tmp, ubuf, len))
1634 		return -EFAULT;
1635 
1636 	tmp[len] = '\0';
1637 
1638 	/* Strip newline, if any */
1639 	newline = strchr(tmp, '\n');
1640 	if (newline)
1641 		*newline = '\0';
1642 
1643 	if (strcmp(tmp, "reset") == 0)
1644 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1645 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1646 		return -EINVAL;
1647 
1648 	if (new_threshold > 0)
1649 		drm_dbg_kms(&dev_priv->drm,
1650 			    "Setting HPD storm detection threshold to %d\n",
1651 			    new_threshold);
1652 	else
1653 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1654 
1655 	spin_lock_irq(&dev_priv->irq_lock);
1656 	hotplug->hpd_storm_threshold = new_threshold;
1657 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1658 	for_each_hpd_pin(i)
1659 		hotplug->stats[i].count = 0;
1660 	spin_unlock_irq(&dev_priv->irq_lock);
1661 
1662 	/* Re-enable hpd immediately if we were in an irq storm */
1663 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1664 
1665 	return len;
1666 }
1667 
1668 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1669 {
1670 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1671 }
1672 
1673 static const struct file_operations i915_hpd_storm_ctl_fops = {
1674 	.owner = THIS_MODULE,
1675 	.open = i915_hpd_storm_ctl_open,
1676 	.read = seq_read,
1677 	.llseek = seq_lseek,
1678 	.release = single_release,
1679 	.write = i915_hpd_storm_ctl_write
1680 };
1681 
1682 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1683 {
1684 	struct drm_i915_private *dev_priv = m->private;
1685 
1686 	seq_printf(m, "Enabled: %s\n",
1687 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1688 
1689 	return 0;
1690 }
1691 
1692 static int
1693 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1694 {
1695 	return single_open(file, i915_hpd_short_storm_ctl_show,
1696 			   inode->i_private);
1697 }
1698 
1699 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1700 					      const char __user *ubuf,
1701 					      size_t len, loff_t *offp)
1702 {
1703 	struct seq_file *m = file->private_data;
1704 	struct drm_i915_private *dev_priv = m->private;
1705 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1706 	char *newline;
1707 	char tmp[16];
1708 	int i;
1709 	bool new_state;
1710 
1711 	if (len >= sizeof(tmp))
1712 		return -EINVAL;
1713 
1714 	if (copy_from_user(tmp, ubuf, len))
1715 		return -EFAULT;
1716 
1717 	tmp[len] = '\0';
1718 
1719 	/* Strip newline, if any */
1720 	newline = strchr(tmp, '\n');
1721 	if (newline)
1722 		*newline = '\0';
1723 
1724 	/* Reset to the "default" state for this system */
1725 	if (strcmp(tmp, "reset") == 0)
1726 		new_state = !HAS_DP_MST(dev_priv);
1727 	else if (kstrtobool(tmp, &new_state) != 0)
1728 		return -EINVAL;
1729 
1730 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1731 		    new_state ? "En" : "Dis");
1732 
1733 	spin_lock_irq(&dev_priv->irq_lock);
1734 	hotplug->hpd_short_storm_enabled = new_state;
1735 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1736 	for_each_hpd_pin(i)
1737 		hotplug->stats[i].count = 0;
1738 	spin_unlock_irq(&dev_priv->irq_lock);
1739 
1740 	/* Re-enable hpd immediately if we were in an irq storm */
1741 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1742 
1743 	return len;
1744 }
1745 
1746 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1747 	.owner = THIS_MODULE,
1748 	.open = i915_hpd_short_storm_ctl_open,
1749 	.read = seq_read,
1750 	.llseek = seq_lseek,
1751 	.release = single_release,
1752 	.write = i915_hpd_short_storm_ctl_write,
1753 };
1754 
1755 static int i915_drrs_ctl_set(void *data, u64 val)
1756 {
1757 	struct drm_i915_private *dev_priv = data;
1758 	struct drm_device *dev = &dev_priv->drm;
1759 	struct intel_crtc *crtc;
1760 
1761 	if (INTEL_GEN(dev_priv) < 7)
1762 		return -ENODEV;
1763 
1764 	for_each_intel_crtc(dev, crtc) {
1765 		struct drm_connector_list_iter conn_iter;
1766 		struct intel_crtc_state *crtc_state;
1767 		struct drm_connector *connector;
1768 		struct drm_crtc_commit *commit;
1769 		int ret;
1770 
1771 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1772 		if (ret)
1773 			return ret;
1774 
1775 		crtc_state = to_intel_crtc_state(crtc->base.state);
1776 
1777 		if (!crtc_state->hw.active ||
1778 		    !crtc_state->has_drrs)
1779 			goto out;
1780 
1781 		commit = crtc_state->uapi.commit;
1782 		if (commit) {
1783 			ret = wait_for_completion_interruptible(&commit->hw_done);
1784 			if (ret)
1785 				goto out;
1786 		}
1787 
1788 		drm_connector_list_iter_begin(dev, &conn_iter);
1789 		drm_for_each_connector_iter(connector, &conn_iter) {
1790 			struct intel_encoder *encoder;
1791 			struct intel_dp *intel_dp;
1792 
1793 			if (!(crtc_state->uapi.connector_mask &
1794 			      drm_connector_mask(connector)))
1795 				continue;
1796 
1797 			encoder = intel_attached_encoder(to_intel_connector(connector));
1798 			if (encoder->type != INTEL_OUTPUT_EDP)
1799 				continue;
1800 
1801 			drm_dbg(&dev_priv->drm,
1802 				"Manually %sabling DRRS. %llu\n",
1803 				val ? "en" : "dis", val);
1804 
1805 			intel_dp = enc_to_intel_dp(encoder);
1806 			if (val)
1807 				intel_edp_drrs_enable(intel_dp,
1808 						      crtc_state);
1809 			else
1810 				intel_edp_drrs_disable(intel_dp,
1811 						       crtc_state);
1812 		}
1813 		drm_connector_list_iter_end(&conn_iter);
1814 
1815 out:
1816 		drm_modeset_unlock(&crtc->base.mutex);
1817 		if (ret)
1818 			return ret;
1819 	}
1820 
1821 	return 0;
1822 }
1823 
1824 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1825 
1826 static ssize_t
1827 i915_fifo_underrun_reset_write(struct file *filp,
1828 			       const char __user *ubuf,
1829 			       size_t cnt, loff_t *ppos)
1830 {
1831 	struct drm_i915_private *dev_priv = filp->private_data;
1832 	struct intel_crtc *intel_crtc;
1833 	struct drm_device *dev = &dev_priv->drm;
1834 	int ret;
1835 	bool reset;
1836 
1837 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1838 	if (ret)
1839 		return ret;
1840 
1841 	if (!reset)
1842 		return cnt;
1843 
1844 	for_each_intel_crtc(dev, intel_crtc) {
1845 		struct drm_crtc_commit *commit;
1846 		struct intel_crtc_state *crtc_state;
1847 
1848 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
1849 		if (ret)
1850 			return ret;
1851 
1852 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
1853 		commit = crtc_state->uapi.commit;
1854 		if (commit) {
1855 			ret = wait_for_completion_interruptible(&commit->hw_done);
1856 			if (!ret)
1857 				ret = wait_for_completion_interruptible(&commit->flip_done);
1858 		}
1859 
1860 		if (!ret && crtc_state->hw.active) {
1861 			drm_dbg_kms(&dev_priv->drm,
1862 				    "Re-arming FIFO underruns on pipe %c\n",
1863 				    pipe_name(intel_crtc->pipe));
1864 
1865 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
1866 		}
1867 
1868 		drm_modeset_unlock(&intel_crtc->base.mutex);
1869 
1870 		if (ret)
1871 			return ret;
1872 	}
1873 
1874 	ret = intel_fbc_reset_underrun(dev_priv);
1875 	if (ret)
1876 		return ret;
1877 
1878 	return cnt;
1879 }
1880 
1881 static const struct file_operations i915_fifo_underrun_reset_ops = {
1882 	.owner = THIS_MODULE,
1883 	.open = simple_open,
1884 	.write = i915_fifo_underrun_reset_write,
1885 	.llseek = default_llseek,
1886 };
1887 
1888 static const struct drm_info_list intel_display_debugfs_list[] = {
1889 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
1890 	{"i915_fbc_status", i915_fbc_status, 0},
1891 	{"i915_ips_status", i915_ips_status, 0},
1892 	{"i915_sr_status", i915_sr_status, 0},
1893 	{"i915_opregion", i915_opregion, 0},
1894 	{"i915_vbt", i915_vbt, 0},
1895 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1896 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
1897 	{"i915_power_domain_info", i915_power_domain_info, 0},
1898 	{"i915_dmc_info", i915_dmc_info, 0},
1899 	{"i915_display_info", i915_display_info, 0},
1900 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
1901 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
1902 	{"i915_ddb_info", i915_ddb_info, 0},
1903 	{"i915_drrs_status", i915_drrs_status, 0},
1904 };
1905 
1906 static const struct {
1907 	const char *name;
1908 	const struct file_operations *fops;
1909 } intel_display_debugfs_files[] = {
1910 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
1911 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
1912 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
1913 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
1914 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
1915 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
1916 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
1917 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
1918 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
1919 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
1920 	{"i915_ipc_status", &i915_ipc_status_fops},
1921 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
1922 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
1923 };
1924 
1925 int intel_display_debugfs_register(struct drm_i915_private *i915)
1926 {
1927 	struct drm_minor *minor = i915->drm.primary;
1928 	int i;
1929 
1930 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
1931 		debugfs_create_file(intel_display_debugfs_files[i].name,
1932 				    S_IRUGO | S_IWUSR,
1933 				    minor->debugfs_root,
1934 				    to_i915(minor->dev),
1935 				    intel_display_debugfs_files[i].fops);
1936 	}
1937 
1938 	return drm_debugfs_create_files(intel_display_debugfs_list,
1939 					ARRAY_SIZE(intel_display_debugfs_list),
1940 					minor->debugfs_root, minor);
1941 }
1942 
1943 static int i915_panel_show(struct seq_file *m, void *data)
1944 {
1945 	struct drm_connector *connector = m->private;
1946 	struct intel_dp *intel_dp =
1947 		intel_attached_dp(to_intel_connector(connector));
1948 
1949 	if (connector->status != connector_status_connected)
1950 		return -ENODEV;
1951 
1952 	seq_printf(m, "Panel power up delay: %d\n",
1953 		   intel_dp->panel_power_up_delay);
1954 	seq_printf(m, "Panel power down delay: %d\n",
1955 		   intel_dp->panel_power_down_delay);
1956 	seq_printf(m, "Backlight on delay: %d\n",
1957 		   intel_dp->backlight_on_delay);
1958 	seq_printf(m, "Backlight off delay: %d\n",
1959 		   intel_dp->backlight_off_delay);
1960 
1961 	return 0;
1962 }
1963 DEFINE_SHOW_ATTRIBUTE(i915_panel);
1964 
1965 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
1966 {
1967 	struct drm_connector *connector = m->private;
1968 	struct intel_connector *intel_connector = to_intel_connector(connector);
1969 
1970 	if (connector->status != connector_status_connected)
1971 		return -ENODEV;
1972 
1973 	/* HDCP is supported by connector */
1974 	if (!intel_connector->hdcp.shim)
1975 		return -EINVAL;
1976 
1977 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
1978 		   connector->base.id);
1979 	intel_hdcp_info(m, intel_connector);
1980 
1981 	return 0;
1982 }
1983 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
1984 
1985 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
1986 {
1987 	struct drm_connector *connector = m->private;
1988 	struct drm_device *dev = connector->dev;
1989 	struct drm_crtc *crtc;
1990 	struct intel_dp *intel_dp;
1991 	struct drm_modeset_acquire_ctx ctx;
1992 	struct intel_crtc_state *crtc_state = NULL;
1993 	int ret = 0;
1994 	bool try_again = false;
1995 
1996 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1997 
1998 	do {
1999 		try_again = false;
2000 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2001 				       &ctx);
2002 		if (ret) {
2003 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2004 				try_again = true;
2005 				continue;
2006 			}
2007 			break;
2008 		}
2009 		crtc = connector->state->crtc;
2010 		if (connector->status != connector_status_connected || !crtc) {
2011 			ret = -ENODEV;
2012 			break;
2013 		}
2014 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2015 		if (ret == -EDEADLK) {
2016 			ret = drm_modeset_backoff(&ctx);
2017 			if (!ret) {
2018 				try_again = true;
2019 				continue;
2020 			}
2021 			break;
2022 		} else if (ret) {
2023 			break;
2024 		}
2025 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2026 		crtc_state = to_intel_crtc_state(crtc->state);
2027 		seq_printf(m, "DSC_Enabled: %s\n",
2028 			   yesno(crtc_state->dsc.compression_enable));
2029 		seq_printf(m, "DSC_Sink_Support: %s\n",
2030 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2031 		seq_printf(m, "Force_DSC_Enable: %s\n",
2032 			   yesno(intel_dp->force_dsc_en));
2033 		if (!intel_dp_is_edp(intel_dp))
2034 			seq_printf(m, "FEC_Sink_Support: %s\n",
2035 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2036 	} while (try_again);
2037 
2038 	drm_modeset_drop_locks(&ctx);
2039 	drm_modeset_acquire_fini(&ctx);
2040 
2041 	return ret;
2042 }
2043 
2044 static ssize_t i915_dsc_fec_support_write(struct file *file,
2045 					  const char __user *ubuf,
2046 					  size_t len, loff_t *offp)
2047 {
2048 	bool dsc_enable = false;
2049 	int ret;
2050 	struct drm_connector *connector =
2051 		((struct seq_file *)file->private_data)->private;
2052 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2053 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2054 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2055 
2056 	if (len == 0)
2057 		return 0;
2058 
2059 	drm_dbg(&i915->drm,
2060 		"Copied %zu bytes from user to force DSC\n", len);
2061 
2062 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2063 	if (ret < 0)
2064 		return ret;
2065 
2066 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2067 		(dsc_enable) ? "true" : "false");
2068 	intel_dp->force_dsc_en = dsc_enable;
2069 
2070 	*offp += len;
2071 	return len;
2072 }
2073 
2074 static int i915_dsc_fec_support_open(struct inode *inode,
2075 				     struct file *file)
2076 {
2077 	return single_open(file, i915_dsc_fec_support_show,
2078 			   inode->i_private);
2079 }
2080 
2081 static const struct file_operations i915_dsc_fec_support_fops = {
2082 	.owner = THIS_MODULE,
2083 	.open = i915_dsc_fec_support_open,
2084 	.read = seq_read,
2085 	.llseek = seq_lseek,
2086 	.release = single_release,
2087 	.write = i915_dsc_fec_support_write
2088 };
2089 
2090 /**
2091  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2092  * @connector: pointer to a registered drm_connector
2093  *
2094  * Cleanup will be done by drm_connector_unregister() through a call to
2095  * drm_debugfs_connector_remove().
2096  *
2097  * Returns 0 on success, negative error codes on error.
2098  */
2099 int intel_connector_debugfs_add(struct drm_connector *connector)
2100 {
2101 	struct dentry *root = connector->debugfs_entry;
2102 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2103 
2104 	/* The connector must have been registered beforehands. */
2105 	if (!root)
2106 		return -ENODEV;
2107 
2108 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2109 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2110 				    connector, &i915_panel_fops);
2111 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2112 				    connector, &i915_psr_sink_status_fops);
2113 	}
2114 
2115 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2116 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2117 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2118 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2119 				    connector, &i915_hdcp_sink_capability_fops);
2120 	}
2121 
2122 	if (INTEL_GEN(dev_priv) >= 10 &&
2123 	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2124 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2125 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2126 				    connector, &i915_dsc_fec_support_fops);
2127 
2128 	return 0;
2129 }
2130