1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_de.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dmc.h"
15 #include "intel_dp.h"
16 #include "intel_dp_mst.h"
17 #include "intel_drrs.h"
18 #include "intel_fbc.h"
19 #include "intel_fbdev.h"
20 #include "intel_hdcp.h"
21 #include "intel_hdmi.h"
22 #include "intel_pm.h"
23 #include "intel_psr.h"
24 #include "intel_sprite.h"
25 
26 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
27 {
28 	return to_i915(node->minor->dev);
29 }
30 
31 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
32 {
33 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
34 
35 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
36 		   dev_priv->fb_tracking.busy_bits);
37 
38 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
39 		   dev_priv->fb_tracking.flip_bits);
40 
41 	return 0;
42 }
43 
44 static int i915_ips_status(struct seq_file *m, void *unused)
45 {
46 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
47 	intel_wakeref_t wakeref;
48 
49 	if (!HAS_IPS(dev_priv))
50 		return -ENODEV;
51 
52 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
53 
54 	seq_printf(m, "Enabled by kernel parameter: %s\n",
55 		   yesno(dev_priv->params.enable_ips));
56 
57 	if (DISPLAY_VER(dev_priv) >= 8) {
58 		seq_puts(m, "Currently: unknown\n");
59 	} else {
60 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
61 			seq_puts(m, "Currently: enabled\n");
62 		else
63 			seq_puts(m, "Currently: disabled\n");
64 	}
65 
66 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
67 
68 	return 0;
69 }
70 
71 static int i915_sr_status(struct seq_file *m, void *unused)
72 {
73 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
74 	intel_wakeref_t wakeref;
75 	bool sr_enabled = false;
76 
77 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
78 
79 	if (DISPLAY_VER(dev_priv) >= 9)
80 		/* no global SR status; inspect per-plane WM */;
81 	else if (HAS_PCH_SPLIT(dev_priv))
82 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM_LP_ENABLE;
83 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
84 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
85 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
86 	else if (IS_I915GM(dev_priv))
87 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
88 	else if (IS_PINEVIEW(dev_priv))
89 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
90 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
91 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
92 
93 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
94 
95 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
96 
97 	return 0;
98 }
99 
100 static int i915_opregion(struct seq_file *m, void *unused)
101 {
102 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
103 
104 	if (opregion->header)
105 		seq_write(m, opregion->header, OPREGION_SIZE);
106 
107 	return 0;
108 }
109 
110 static int i915_vbt(struct seq_file *m, void *unused)
111 {
112 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
113 
114 	if (opregion->vbt)
115 		seq_write(m, opregion->vbt, opregion->vbt_size);
116 
117 	return 0;
118 }
119 
120 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
121 {
122 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
123 	struct drm_device *dev = &dev_priv->drm;
124 	struct intel_framebuffer *fbdev_fb = NULL;
125 	struct drm_framebuffer *drm_fb;
126 
127 #ifdef CONFIG_DRM_FBDEV_EMULATION
128 	fbdev_fb = intel_fbdev_framebuffer(dev_priv->fbdev);
129 	if (fbdev_fb) {
130 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
131 			   fbdev_fb->base.width,
132 			   fbdev_fb->base.height,
133 			   fbdev_fb->base.format->depth,
134 			   fbdev_fb->base.format->cpp[0] * 8,
135 			   fbdev_fb->base.modifier,
136 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
137 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
138 		seq_putc(m, '\n');
139 	}
140 #endif
141 
142 	mutex_lock(&dev->mode_config.fb_lock);
143 	drm_for_each_fb(drm_fb, dev) {
144 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
145 		if (fb == fbdev_fb)
146 			continue;
147 
148 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
149 			   fb->base.width,
150 			   fb->base.height,
151 			   fb->base.format->depth,
152 			   fb->base.format->cpp[0] * 8,
153 			   fb->base.modifier,
154 			   drm_framebuffer_read_refcount(&fb->base));
155 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
156 		seq_putc(m, '\n');
157 	}
158 	mutex_unlock(&dev->mode_config.fb_lock);
159 
160 	return 0;
161 }
162 
163 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
164 {
165 	u8 val;
166 	static const char * const sink_status[] = {
167 		"inactive",
168 		"transition to active, capture and display",
169 		"active, display from RFB",
170 		"active, capture and display on sink device timings",
171 		"transition to inactive, capture and display, timing re-sync",
172 		"reserved",
173 		"reserved",
174 		"sink internal error",
175 	};
176 	struct drm_connector *connector = m->private;
177 	struct intel_dp *intel_dp =
178 		intel_attached_dp(to_intel_connector(connector));
179 	int ret;
180 
181 	if (!CAN_PSR(intel_dp)) {
182 		seq_puts(m, "PSR Unsupported\n");
183 		return -ENODEV;
184 	}
185 
186 	if (connector->status != connector_status_connected)
187 		return -ENODEV;
188 
189 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
190 
191 	if (ret == 1) {
192 		const char *str = "unknown";
193 
194 		val &= DP_PSR_SINK_STATE_MASK;
195 		if (val < ARRAY_SIZE(sink_status))
196 			str = sink_status[val];
197 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
198 	} else {
199 		return ret;
200 	}
201 
202 	return 0;
203 }
204 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
205 
206 static void
207 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
208 {
209 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
210 	const char *status = "unknown";
211 	u32 val, status_val;
212 
213 	if (intel_dp->psr.psr2_enabled) {
214 		static const char * const live_status[] = {
215 			"IDLE",
216 			"CAPTURE",
217 			"CAPTURE_FS",
218 			"SLEEP",
219 			"BUFON_FW",
220 			"ML_UP",
221 			"SU_STANDBY",
222 			"FAST_SLEEP",
223 			"DEEP_SLEEP",
224 			"BUF_ON",
225 			"TG_ON"
226 		};
227 		val = intel_de_read(dev_priv,
228 				    EDP_PSR2_STATUS(intel_dp->psr.transcoder));
229 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
230 		if (status_val < ARRAY_SIZE(live_status))
231 			status = live_status[status_val];
232 	} else {
233 		static const char * const live_status[] = {
234 			"IDLE",
235 			"SRDONACK",
236 			"SRDENT",
237 			"BUFOFF",
238 			"BUFON",
239 			"AUXACK",
240 			"SRDOFFACK",
241 			"SRDENT_ON",
242 		};
243 		val = intel_de_read(dev_priv,
244 				    EDP_PSR_STATUS(intel_dp->psr.transcoder));
245 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
246 			      EDP_PSR_STATUS_STATE_SHIFT;
247 		if (status_val < ARRAY_SIZE(live_status))
248 			status = live_status[status_val];
249 	}
250 
251 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
252 }
253 
254 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
255 {
256 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
257 	struct intel_psr *psr = &intel_dp->psr;
258 	intel_wakeref_t wakeref;
259 	const char *status;
260 	bool enabled;
261 	u32 val;
262 
263 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
264 	if (psr->sink_support)
265 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
266 	seq_puts(m, "\n");
267 
268 	if (!psr->sink_support)
269 		return 0;
270 
271 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
272 	mutex_lock(&psr->lock);
273 
274 	if (psr->enabled)
275 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
276 	else
277 		status = "disabled";
278 	seq_printf(m, "PSR mode: %s\n", status);
279 
280 	if (!psr->enabled) {
281 		seq_printf(m, "PSR sink not reliable: %s\n",
282 			   yesno(psr->sink_not_reliable));
283 
284 		goto unlock;
285 	}
286 
287 	if (psr->psr2_enabled) {
288 		val = intel_de_read(dev_priv,
289 				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
290 		enabled = val & EDP_PSR2_ENABLE;
291 	} else {
292 		val = intel_de_read(dev_priv,
293 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
294 		enabled = val & EDP_PSR_ENABLE;
295 	}
296 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
297 		   enableddisabled(enabled), val);
298 	psr_source_status(intel_dp, m);
299 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
300 		   psr->busy_frontbuffer_bits);
301 
302 	/*
303 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
304 	 */
305 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
306 		val = intel_de_read(dev_priv,
307 				    EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
308 		val &= EDP_PSR_PERF_CNT_MASK;
309 		seq_printf(m, "Performance counter: %u\n", val);
310 	}
311 
312 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
313 		seq_printf(m, "Last attempted entry at: %lld\n",
314 			   psr->last_entry_attempt);
315 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
316 	}
317 
318 	if (psr->psr2_enabled) {
319 		u32 su_frames_val[3];
320 		int frame;
321 
322 		/*
323 		 * Reading all 3 registers before hand to minimize crossing a
324 		 * frame boundary between register reads
325 		 */
326 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
327 			val = intel_de_read(dev_priv,
328 					    PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
329 			su_frames_val[frame / 3] = val;
330 		}
331 
332 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
333 
334 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
335 			u32 su_blocks;
336 
337 			su_blocks = su_frames_val[frame / 3] &
338 				    PSR2_SU_STATUS_MASK(frame);
339 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
340 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
341 		}
342 
343 		seq_printf(m, "PSR2 selective fetch: %s\n",
344 			   enableddisabled(psr->psr2_sel_fetch_enabled));
345 	}
346 
347 unlock:
348 	mutex_unlock(&psr->lock);
349 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
350 
351 	return 0;
352 }
353 
354 static int i915_edp_psr_status(struct seq_file *m, void *data)
355 {
356 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
357 	struct intel_dp *intel_dp = NULL;
358 	struct intel_encoder *encoder;
359 
360 	if (!HAS_PSR(dev_priv))
361 		return -ENODEV;
362 
363 	/* Find the first EDP which supports PSR */
364 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
365 		intel_dp = enc_to_intel_dp(encoder);
366 		break;
367 	}
368 
369 	if (!intel_dp)
370 		return -ENODEV;
371 
372 	return intel_psr_status(m, intel_dp);
373 }
374 
375 static int
376 i915_edp_psr_debug_set(void *data, u64 val)
377 {
378 	struct drm_i915_private *dev_priv = data;
379 	struct intel_encoder *encoder;
380 	intel_wakeref_t wakeref;
381 	int ret = -ENODEV;
382 
383 	if (!HAS_PSR(dev_priv))
384 		return ret;
385 
386 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
387 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
388 
389 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
390 
391 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
392 
393 		// TODO: split to each transcoder's PSR debug state
394 		ret = intel_psr_debug_set(intel_dp, val);
395 
396 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
397 	}
398 
399 	return ret;
400 }
401 
402 static int
403 i915_edp_psr_debug_get(void *data, u64 *val)
404 {
405 	struct drm_i915_private *dev_priv = data;
406 	struct intel_encoder *encoder;
407 
408 	if (!HAS_PSR(dev_priv))
409 		return -ENODEV;
410 
411 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
412 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
413 
414 		// TODO: split to each transcoder's PSR debug state
415 		*val = READ_ONCE(intel_dp->psr.debug);
416 		return 0;
417 	}
418 
419 	return -ENODEV;
420 }
421 
422 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
423 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
424 			"%llu\n");
425 
426 static int i915_power_domain_info(struct seq_file *m, void *unused)
427 {
428 	struct drm_i915_private *i915 = node_to_i915(m->private);
429 
430 	intel_display_power_debug(i915, m);
431 
432 	return 0;
433 }
434 
435 static int i915_dmc_info(struct seq_file *m, void *unused)
436 {
437 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
438 	intel_wakeref_t wakeref;
439 	struct intel_dmc *dmc;
440 	i915_reg_t dc5_reg, dc6_reg = {};
441 
442 	if (!HAS_DMC(dev_priv))
443 		return -ENODEV;
444 
445 	dmc = &dev_priv->dmc;
446 
447 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
448 
449 	seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
450 	seq_printf(m, "path: %s\n", dmc->fw_path);
451 	seq_printf(m, "Pipe A fw support: %s\n",
452 		   yesno(GRAPHICS_VER(dev_priv) >= 12));
453 	seq_printf(m, "Pipe A fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEA].payload));
454 	seq_printf(m, "Pipe B fw support: %s\n", yesno(IS_ALDERLAKE_P(dev_priv)));
455 	seq_printf(m, "Pipe B fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEB].payload));
456 
457 	if (!intel_dmc_has_payload(dev_priv))
458 		goto out;
459 
460 	seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
461 		   DMC_VERSION_MINOR(dmc->version));
462 
463 	if (DISPLAY_VER(dev_priv) >= 12) {
464 		if (IS_DGFX(dev_priv)) {
465 			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
466 		} else {
467 			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
468 			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
469 		}
470 
471 		/*
472 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
473 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
474 		 * reg for DC3CO debugging and validation,
475 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
476 		 */
477 		seq_printf(m, "DC3CO count: %d\n", intel_de_read(dev_priv, IS_DGFX(dev_priv) ?
478 					DG1_DMC_DEBUG3 : TGL_DMC_DEBUG3));
479 	} else {
480 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
481 						 SKL_DMC_DC3_DC5_COUNT;
482 		if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv))
483 			dc6_reg = SKL_DMC_DC5_DC6_COUNT;
484 	}
485 
486 	seq_printf(m, "DC3 -> DC5 count: %d\n",
487 		   intel_de_read(dev_priv, dc5_reg));
488 	if (dc6_reg.reg)
489 		seq_printf(m, "DC5 -> DC6 count: %d\n",
490 			   intel_de_read(dev_priv, dc6_reg));
491 
492 out:
493 	seq_printf(m, "program base: 0x%08x\n",
494 		   intel_de_read(dev_priv, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
495 	seq_printf(m, "ssp base: 0x%08x\n",
496 		   intel_de_read(dev_priv, DMC_SSP_BASE));
497 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
498 
499 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
500 
501 	return 0;
502 }
503 
504 static void intel_seq_print_mode(struct seq_file *m, int tabs,
505 				 const struct drm_display_mode *mode)
506 {
507 	int i;
508 
509 	for (i = 0; i < tabs; i++)
510 		seq_putc(m, '\t');
511 
512 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
513 }
514 
515 static void intel_encoder_info(struct seq_file *m,
516 			       struct intel_crtc *crtc,
517 			       struct intel_encoder *encoder)
518 {
519 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
520 	struct drm_connector_list_iter conn_iter;
521 	struct drm_connector *connector;
522 
523 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
524 		   encoder->base.base.id, encoder->base.name);
525 
526 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
527 	drm_for_each_connector_iter(connector, &conn_iter) {
528 		const struct drm_connector_state *conn_state =
529 			connector->state;
530 
531 		if (conn_state->best_encoder != &encoder->base)
532 			continue;
533 
534 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
535 			   connector->base.id, connector->name);
536 	}
537 	drm_connector_list_iter_end(&conn_iter);
538 }
539 
540 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
541 {
542 	const struct drm_display_mode *mode = panel->fixed_mode;
543 
544 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
545 }
546 
547 static void intel_hdcp_info(struct seq_file *m,
548 			    struct intel_connector *intel_connector)
549 {
550 	bool hdcp_cap, hdcp2_cap;
551 
552 	if (!intel_connector->hdcp.shim) {
553 		seq_puts(m, "No Connector Support");
554 		goto out;
555 	}
556 
557 	hdcp_cap = intel_hdcp_capable(intel_connector);
558 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
559 
560 	if (hdcp_cap)
561 		seq_puts(m, "HDCP1.4 ");
562 	if (hdcp2_cap)
563 		seq_puts(m, "HDCP2.2 ");
564 
565 	if (!hdcp_cap && !hdcp2_cap)
566 		seq_puts(m, "None");
567 
568 out:
569 	seq_puts(m, "\n");
570 }
571 
572 static void intel_dp_info(struct seq_file *m,
573 			  struct intel_connector *intel_connector)
574 {
575 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
576 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
577 	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
578 
579 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
580 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
581 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
582 		intel_panel_info(m, &intel_connector->panel);
583 
584 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
585 				edid ? edid->data : NULL, &intel_dp->aux);
586 }
587 
588 static void intel_dp_mst_info(struct seq_file *m,
589 			      struct intel_connector *intel_connector)
590 {
591 	bool has_audio = intel_connector->port->has_audio;
592 
593 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
594 }
595 
596 static void intel_hdmi_info(struct seq_file *m,
597 			    struct intel_connector *intel_connector)
598 {
599 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
600 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
601 
602 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
603 }
604 
605 static void intel_lvds_info(struct seq_file *m,
606 			    struct intel_connector *intel_connector)
607 {
608 	intel_panel_info(m, &intel_connector->panel);
609 }
610 
611 static void intel_connector_info(struct seq_file *m,
612 				 struct drm_connector *connector)
613 {
614 	struct intel_connector *intel_connector = to_intel_connector(connector);
615 	const struct drm_connector_state *conn_state = connector->state;
616 	struct intel_encoder *encoder =
617 		to_intel_encoder(conn_state->best_encoder);
618 	const struct drm_display_mode *mode;
619 
620 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
621 		   connector->base.id, connector->name,
622 		   drm_get_connector_status_name(connector->status));
623 
624 	if (connector->status == connector_status_disconnected)
625 		return;
626 
627 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
628 		   connector->display_info.width_mm,
629 		   connector->display_info.height_mm);
630 	seq_printf(m, "\tsubpixel order: %s\n",
631 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
632 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
633 
634 	if (!encoder)
635 		return;
636 
637 	switch (connector->connector_type) {
638 	case DRM_MODE_CONNECTOR_DisplayPort:
639 	case DRM_MODE_CONNECTOR_eDP:
640 		if (encoder->type == INTEL_OUTPUT_DP_MST)
641 			intel_dp_mst_info(m, intel_connector);
642 		else
643 			intel_dp_info(m, intel_connector);
644 		break;
645 	case DRM_MODE_CONNECTOR_LVDS:
646 		if (encoder->type == INTEL_OUTPUT_LVDS)
647 			intel_lvds_info(m, intel_connector);
648 		break;
649 	case DRM_MODE_CONNECTOR_HDMIA:
650 		if (encoder->type == INTEL_OUTPUT_HDMI ||
651 		    encoder->type == INTEL_OUTPUT_DDI)
652 			intel_hdmi_info(m, intel_connector);
653 		break;
654 	default:
655 		break;
656 	}
657 
658 	seq_puts(m, "\tHDCP version: ");
659 	intel_hdcp_info(m, intel_connector);
660 
661 	seq_printf(m, "\tmodes:\n");
662 	list_for_each_entry(mode, &connector->modes, head)
663 		intel_seq_print_mode(m, 2, mode);
664 }
665 
666 static const char *plane_type(enum drm_plane_type type)
667 {
668 	switch (type) {
669 	case DRM_PLANE_TYPE_OVERLAY:
670 		return "OVL";
671 	case DRM_PLANE_TYPE_PRIMARY:
672 		return "PRI";
673 	case DRM_PLANE_TYPE_CURSOR:
674 		return "CUR";
675 	/*
676 	 * Deliberately omitting default: to generate compiler warnings
677 	 * when a new drm_plane_type gets added.
678 	 */
679 	}
680 
681 	return "unknown";
682 }
683 
684 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
685 {
686 	/*
687 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
688 	 * will print them all to visualize if the values are misused
689 	 */
690 	snprintf(buf, bufsize,
691 		 "%s%s%s%s%s%s(0x%08x)",
692 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
693 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
694 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
695 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
696 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
697 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
698 		 rotation);
699 }
700 
701 static const char *plane_visibility(const struct intel_plane_state *plane_state)
702 {
703 	if (plane_state->uapi.visible)
704 		return "visible";
705 
706 	if (plane_state->planar_slave)
707 		return "planar-slave";
708 
709 	return "hidden";
710 }
711 
712 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
713 {
714 	const struct intel_plane_state *plane_state =
715 		to_intel_plane_state(plane->base.state);
716 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
717 	struct drm_rect src, dst;
718 	char rot_str[48];
719 
720 	src = drm_plane_state_src(&plane_state->uapi);
721 	dst = drm_plane_state_dest(&plane_state->uapi);
722 
723 	plane_rotation(rot_str, sizeof(rot_str),
724 		       plane_state->uapi.rotation);
725 
726 	seq_puts(m, "\t\tuapi: [FB:");
727 	if (fb)
728 		seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
729 			   &fb->format->format, fb->modifier, fb->width,
730 			   fb->height);
731 	else
732 		seq_puts(m, "0] n/a,0x0,0x0,");
733 	seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
734 		   ", rotation=%s\n", plane_visibility(plane_state),
735 		   DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
736 
737 	if (plane_state->planar_linked_plane)
738 		seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
739 			   plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
740 			   plane_state->planar_slave ? "slave" : "master");
741 }
742 
743 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
744 {
745 	const struct intel_plane_state *plane_state =
746 		to_intel_plane_state(plane->base.state);
747 	const struct drm_framebuffer *fb = plane_state->hw.fb;
748 	char rot_str[48];
749 
750 	if (!fb)
751 		return;
752 
753 	plane_rotation(rot_str, sizeof(rot_str),
754 		       plane_state->hw.rotation);
755 
756 	seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
757 		   DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
758 		   fb->base.id, &fb->format->format,
759 		   fb->modifier, fb->width, fb->height,
760 		   yesno(plane_state->uapi.visible),
761 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
762 		   DRM_RECT_ARG(&plane_state->uapi.dst),
763 		   rot_str);
764 }
765 
766 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
767 {
768 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
769 	struct intel_plane *plane;
770 
771 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
772 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
773 			   plane->base.base.id, plane->base.name,
774 			   plane_type(plane->base.type));
775 		intel_plane_uapi_info(m, plane);
776 		intel_plane_hw_info(m, plane);
777 	}
778 }
779 
780 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
781 {
782 	const struct intel_crtc_state *crtc_state =
783 		to_intel_crtc_state(crtc->base.state);
784 	int num_scalers = crtc->num_scalers;
785 	int i;
786 
787 	/* Not all platformas have a scaler */
788 	if (num_scalers) {
789 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
790 			   num_scalers,
791 			   crtc_state->scaler_state.scaler_users,
792 			   crtc_state->scaler_state.scaler_id);
793 
794 		for (i = 0; i < num_scalers; i++) {
795 			const struct intel_scaler *sc =
796 				&crtc_state->scaler_state.scalers[i];
797 
798 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
799 				   i, yesno(sc->in_use), sc->mode);
800 		}
801 		seq_puts(m, "\n");
802 	} else {
803 		seq_puts(m, "\tNo scalers available on this platform\n");
804 	}
805 }
806 
807 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
808 static void crtc_updates_info(struct seq_file *m,
809 			      struct intel_crtc *crtc,
810 			      const char *hdr)
811 {
812 	u64 count;
813 	int row;
814 
815 	count = 0;
816 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
817 		count += crtc->debug.vbl.times[row];
818 	seq_printf(m, "%sUpdates: %llu\n", hdr, count);
819 	if (!count)
820 		return;
821 
822 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
823 		char columns[80] = "       |";
824 		unsigned int x;
825 
826 		if (row & 1) {
827 			const char *units;
828 
829 			if (row > 10) {
830 				x = 1000000;
831 				units = "ms";
832 			} else {
833 				x = 1000;
834 				units = "us";
835 			}
836 
837 			snprintf(columns, sizeof(columns), "%4ld%s |",
838 				 DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
839 		}
840 
841 		if (crtc->debug.vbl.times[row]) {
842 			x = ilog2(crtc->debug.vbl.times[row]);
843 			memset(columns + 8, '*', x);
844 			columns[8 + x] = '\0';
845 		}
846 
847 		seq_printf(m, "%s%s\n", hdr, columns);
848 	}
849 
850 	seq_printf(m, "%sMin update: %lluns\n",
851 		   hdr, crtc->debug.vbl.min);
852 	seq_printf(m, "%sMax update: %lluns\n",
853 		   hdr, crtc->debug.vbl.max);
854 	seq_printf(m, "%sAverage update: %lluns\n",
855 		   hdr, div64_u64(crtc->debug.vbl.sum,  count));
856 	seq_printf(m, "%sOverruns > %uus: %u\n",
857 		   hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
858 }
859 
860 static int crtc_updates_show(struct seq_file *m, void *data)
861 {
862 	crtc_updates_info(m, m->private, "");
863 	return 0;
864 }
865 
866 static int crtc_updates_open(struct inode *inode, struct file *file)
867 {
868 	return single_open(file, crtc_updates_show, inode->i_private);
869 }
870 
871 static ssize_t crtc_updates_write(struct file *file,
872 				  const char __user *ubuf,
873 				  size_t len, loff_t *offp)
874 {
875 	struct seq_file *m = file->private_data;
876 	struct intel_crtc *crtc = m->private;
877 
878 	/* May race with an update. Meh. */
879 	memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
880 
881 	return len;
882 }
883 
884 static const struct file_operations crtc_updates_fops = {
885 	.owner = THIS_MODULE,
886 	.open = crtc_updates_open,
887 	.read = seq_read,
888 	.llseek = seq_lseek,
889 	.release = single_release,
890 	.write = crtc_updates_write
891 };
892 
893 static void crtc_updates_add(struct drm_crtc *crtc)
894 {
895 	debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
896 			    to_intel_crtc(crtc), &crtc_updates_fops);
897 }
898 
899 #else
900 static void crtc_updates_info(struct seq_file *m,
901 			      struct intel_crtc *crtc,
902 			      const char *hdr)
903 {
904 }
905 
906 static void crtc_updates_add(struct drm_crtc *crtc)
907 {
908 }
909 #endif
910 
911 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
912 {
913 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
914 	const struct intel_crtc_state *crtc_state =
915 		to_intel_crtc_state(crtc->base.state);
916 	struct intel_encoder *encoder;
917 
918 	seq_printf(m, "[CRTC:%d:%s]:\n",
919 		   crtc->base.base.id, crtc->base.name);
920 
921 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
922 		   yesno(crtc_state->uapi.enable),
923 		   yesno(crtc_state->uapi.active),
924 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
925 
926 	seq_printf(m, "\thw: enable=%s, active=%s\n",
927 		   yesno(crtc_state->hw.enable), yesno(crtc_state->hw.active));
928 	seq_printf(m, "\tadjusted_mode=" DRM_MODE_FMT "\n",
929 		   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
930 	seq_printf(m, "\tpipe__mode=" DRM_MODE_FMT "\n",
931 		   DRM_MODE_ARG(&crtc_state->hw.pipe_mode));
932 
933 	seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
934 		   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
935 		   yesno(crtc_state->dither), crtc_state->pipe_bpp);
936 
937 	intel_scaler_info(m, crtc);
938 
939 	if (crtc_state->bigjoiner)
940 		seq_printf(m, "\tLinked to 0x%x pipes as a %s\n",
941 			   crtc_state->bigjoiner_pipes,
942 			   intel_crtc_is_bigjoiner_slave(crtc_state) ? "slave" : "master");
943 
944 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
945 				    crtc_state->uapi.encoder_mask)
946 		intel_encoder_info(m, crtc, encoder);
947 
948 	intel_plane_info(m, crtc);
949 
950 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
951 		   yesno(!crtc->cpu_fifo_underrun_disabled),
952 		   yesno(!crtc->pch_fifo_underrun_disabled));
953 
954 	crtc_updates_info(m, crtc, "\t");
955 }
956 
957 static int i915_display_info(struct seq_file *m, void *unused)
958 {
959 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
960 	struct drm_device *dev = &dev_priv->drm;
961 	struct intel_crtc *crtc;
962 	struct drm_connector *connector;
963 	struct drm_connector_list_iter conn_iter;
964 	intel_wakeref_t wakeref;
965 
966 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
967 
968 	drm_modeset_lock_all(dev);
969 
970 	seq_printf(m, "CRTC info\n");
971 	seq_printf(m, "---------\n");
972 	for_each_intel_crtc(dev, crtc)
973 		intel_crtc_info(m, crtc);
974 
975 	seq_printf(m, "\n");
976 	seq_printf(m, "Connector info\n");
977 	seq_printf(m, "--------------\n");
978 	drm_connector_list_iter_begin(dev, &conn_iter);
979 	drm_for_each_connector_iter(connector, &conn_iter)
980 		intel_connector_info(m, connector);
981 	drm_connector_list_iter_end(&conn_iter);
982 
983 	drm_modeset_unlock_all(dev);
984 
985 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
986 
987 	return 0;
988 }
989 
990 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
991 {
992 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
993 	struct drm_device *dev = &dev_priv->drm;
994 	int i;
995 
996 	drm_modeset_lock_all(dev);
997 
998 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
999 		   dev_priv->dpll.ref_clks.nssc,
1000 		   dev_priv->dpll.ref_clks.ssc);
1001 
1002 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
1003 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
1004 
1005 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
1006 			   pll->info->id);
1007 		seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
1008 			   pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
1009 		seq_printf(m, " tracked hardware state:\n");
1010 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
1011 		seq_printf(m, " dpll_md: 0x%08x\n",
1012 			   pll->state.hw_state.dpll_md);
1013 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
1014 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
1015 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
1016 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
1017 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
1018 		seq_printf(m, " div0:    0x%08x\n", pll->state.hw_state.div0);
1019 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
1020 			   pll->state.hw_state.mg_refclkin_ctl);
1021 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
1022 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
1023 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
1024 			   pll->state.hw_state.mg_clktop2_hsclkctl);
1025 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
1026 			   pll->state.hw_state.mg_pll_div0);
1027 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
1028 			   pll->state.hw_state.mg_pll_div1);
1029 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
1030 			   pll->state.hw_state.mg_pll_lf);
1031 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
1032 			   pll->state.hw_state.mg_pll_frac_lock);
1033 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
1034 			   pll->state.hw_state.mg_pll_ssc);
1035 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
1036 			   pll->state.hw_state.mg_pll_bias);
1037 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
1038 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
1039 	}
1040 	drm_modeset_unlock_all(dev);
1041 
1042 	return 0;
1043 }
1044 
1045 static int i915_ipc_status_show(struct seq_file *m, void *data)
1046 {
1047 	struct drm_i915_private *dev_priv = m->private;
1048 
1049 	seq_printf(m, "Isochronous Priority Control: %s\n",
1050 			yesno(dev_priv->ipc_enabled));
1051 	return 0;
1052 }
1053 
1054 static int i915_ipc_status_open(struct inode *inode, struct file *file)
1055 {
1056 	struct drm_i915_private *dev_priv = inode->i_private;
1057 
1058 	if (!HAS_IPC(dev_priv))
1059 		return -ENODEV;
1060 
1061 	return single_open(file, i915_ipc_status_show, dev_priv);
1062 }
1063 
1064 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1065 				     size_t len, loff_t *offp)
1066 {
1067 	struct seq_file *m = file->private_data;
1068 	struct drm_i915_private *dev_priv = m->private;
1069 	intel_wakeref_t wakeref;
1070 	bool enable;
1071 	int ret;
1072 
1073 	ret = kstrtobool_from_user(ubuf, len, &enable);
1074 	if (ret < 0)
1075 		return ret;
1076 
1077 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1078 		if (!dev_priv->ipc_enabled && enable)
1079 			drm_info(&dev_priv->drm,
1080 				 "Enabling IPC: WM will be proper only after next commit\n");
1081 		dev_priv->ipc_enabled = enable;
1082 		intel_enable_ipc(dev_priv);
1083 	}
1084 
1085 	return len;
1086 }
1087 
1088 static const struct file_operations i915_ipc_status_fops = {
1089 	.owner = THIS_MODULE,
1090 	.open = i915_ipc_status_open,
1091 	.read = seq_read,
1092 	.llseek = seq_lseek,
1093 	.release = single_release,
1094 	.write = i915_ipc_status_write
1095 };
1096 
1097 static int i915_ddb_info(struct seq_file *m, void *unused)
1098 {
1099 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1100 	struct drm_device *dev = &dev_priv->drm;
1101 	struct skl_ddb_entry *entry;
1102 	struct intel_crtc *crtc;
1103 
1104 	if (DISPLAY_VER(dev_priv) < 9)
1105 		return -ENODEV;
1106 
1107 	drm_modeset_lock_all(dev);
1108 
1109 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1110 
1111 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1112 		struct intel_crtc_state *crtc_state =
1113 			to_intel_crtc_state(crtc->base.state);
1114 		enum pipe pipe = crtc->pipe;
1115 		enum plane_id plane_id;
1116 
1117 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1118 
1119 		for_each_plane_id_on_crtc(crtc, plane_id) {
1120 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1121 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1122 				   entry->start, entry->end,
1123 				   skl_ddb_entry_size(entry));
1124 		}
1125 
1126 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1127 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1128 			   entry->end, skl_ddb_entry_size(entry));
1129 	}
1130 
1131 	drm_modeset_unlock_all(dev);
1132 
1133 	return 0;
1134 }
1135 
1136 static void drrs_status_per_crtc(struct seq_file *m,
1137 				 struct drm_device *dev,
1138 				 struct intel_crtc *crtc)
1139 {
1140 	struct drm_i915_private *dev_priv = to_i915(dev);
1141 	struct i915_drrs *drrs = &dev_priv->drrs;
1142 	int vrefresh = 0;
1143 	struct drm_connector *connector;
1144 	struct drm_connector_list_iter conn_iter;
1145 
1146 	drm_connector_list_iter_begin(dev, &conn_iter);
1147 	drm_for_each_connector_iter(connector, &conn_iter) {
1148 		bool supported = false;
1149 
1150 		if (connector->state->crtc != &crtc->base)
1151 			continue;
1152 
1153 		seq_printf(m, "%s:\n", connector->name);
1154 
1155 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1156 		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1157 			supported = true;
1158 
1159 		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1160 	}
1161 	drm_connector_list_iter_end(&conn_iter);
1162 
1163 	seq_puts(m, "\n");
1164 
1165 	if (to_intel_crtc_state(crtc->base.state)->has_drrs) {
1166 		struct intel_panel *panel;
1167 
1168 		mutex_lock(&drrs->mutex);
1169 		/* DRRS Supported */
1170 		seq_puts(m, "\tDRRS Enabled: Yes\n");
1171 
1172 		/* disable_drrs() will make drrs->dp NULL */
1173 		if (!drrs->dp) {
1174 			seq_puts(m, "Idleness DRRS: Disabled\n");
1175 			mutex_unlock(&drrs->mutex);
1176 			return;
1177 		}
1178 
1179 		panel = &drrs->dp->attached_connector->panel;
1180 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1181 					drrs->busy_frontbuffer_bits);
1182 
1183 		seq_puts(m, "\n\t\t");
1184 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1185 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1186 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1187 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1188 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1189 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1190 		} else {
1191 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1192 						drrs->refresh_rate_type);
1193 			mutex_unlock(&drrs->mutex);
1194 			return;
1195 		}
1196 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1197 
1198 		seq_puts(m, "\n\t\t");
1199 		mutex_unlock(&drrs->mutex);
1200 	} else {
1201 		/* DRRS not supported. Print the VBT parameter*/
1202 		seq_puts(m, "\tDRRS Enabled : No");
1203 	}
1204 	seq_puts(m, "\n");
1205 }
1206 
1207 static int i915_drrs_status(struct seq_file *m, void *unused)
1208 {
1209 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1210 	struct drm_device *dev = &dev_priv->drm;
1211 	struct intel_crtc *crtc;
1212 	int active_crtc_cnt = 0;
1213 
1214 	drm_modeset_lock_all(dev);
1215 	for_each_intel_crtc(dev, crtc) {
1216 		if (crtc->base.state->active) {
1217 			active_crtc_cnt++;
1218 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1219 
1220 			drrs_status_per_crtc(m, dev, crtc);
1221 		}
1222 	}
1223 	drm_modeset_unlock_all(dev);
1224 
1225 	if (!active_crtc_cnt)
1226 		seq_puts(m, "No active crtc found\n");
1227 
1228 	return 0;
1229 }
1230 
1231 static bool
1232 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1233 			      enum i915_power_well_id power_well_id)
1234 {
1235 	intel_wakeref_t wakeref;
1236 	bool is_enabled;
1237 
1238 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1239 	is_enabled = intel_display_power_well_is_enabled(i915,
1240 							 power_well_id);
1241 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1242 
1243 	return is_enabled;
1244 }
1245 
1246 static int i915_lpsp_status(struct seq_file *m, void *unused)
1247 {
1248 	struct drm_i915_private *i915 = node_to_i915(m->private);
1249 	bool lpsp_enabled = false;
1250 
1251 	if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) {
1252 		lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2);
1253 	} else if (IS_DISPLAY_VER(i915, 11, 12)) {
1254 		lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3);
1255 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1256 		lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL);
1257 	} else {
1258 		seq_puts(m, "LPSP: not supported\n");
1259 		return 0;
1260 	}
1261 
1262 	seq_printf(m, "LPSP: %s\n", enableddisabled(lpsp_enabled));
1263 
1264 	return 0;
1265 }
1266 
1267 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1268 {
1269 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1270 	struct drm_device *dev = &dev_priv->drm;
1271 	struct intel_encoder *intel_encoder;
1272 	struct intel_digital_port *dig_port;
1273 	struct drm_connector *connector;
1274 	struct drm_connector_list_iter conn_iter;
1275 
1276 	drm_connector_list_iter_begin(dev, &conn_iter);
1277 	drm_for_each_connector_iter(connector, &conn_iter) {
1278 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1279 			continue;
1280 
1281 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1282 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1283 			continue;
1284 
1285 		dig_port = enc_to_dig_port(intel_encoder);
1286 		if (!intel_dp_mst_source_support(&dig_port->dp))
1287 			continue;
1288 
1289 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1290 			   dig_port->base.base.base.id,
1291 			   dig_port->base.base.name);
1292 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1293 	}
1294 	drm_connector_list_iter_end(&conn_iter);
1295 
1296 	return 0;
1297 }
1298 
1299 static ssize_t i915_displayport_test_active_write(struct file *file,
1300 						  const char __user *ubuf,
1301 						  size_t len, loff_t *offp)
1302 {
1303 	char *input_buffer;
1304 	int status = 0;
1305 	struct drm_device *dev;
1306 	struct drm_connector *connector;
1307 	struct drm_connector_list_iter conn_iter;
1308 	struct intel_dp *intel_dp;
1309 	int val = 0;
1310 
1311 	dev = ((struct seq_file *)file->private_data)->private;
1312 
1313 	if (len == 0)
1314 		return 0;
1315 
1316 	input_buffer = memdup_user_nul(ubuf, len);
1317 	if (IS_ERR(input_buffer))
1318 		return PTR_ERR(input_buffer);
1319 
1320 	drm_dbg(&to_i915(dev)->drm,
1321 		"Copied %d bytes from user\n", (unsigned int)len);
1322 
1323 	drm_connector_list_iter_begin(dev, &conn_iter);
1324 	drm_for_each_connector_iter(connector, &conn_iter) {
1325 		struct intel_encoder *encoder;
1326 
1327 		if (connector->connector_type !=
1328 		    DRM_MODE_CONNECTOR_DisplayPort)
1329 			continue;
1330 
1331 		encoder = to_intel_encoder(connector->encoder);
1332 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1333 			continue;
1334 
1335 		if (encoder && connector->status == connector_status_connected) {
1336 			intel_dp = enc_to_intel_dp(encoder);
1337 			status = kstrtoint(input_buffer, 10, &val);
1338 			if (status < 0)
1339 				break;
1340 			drm_dbg(&to_i915(dev)->drm,
1341 				"Got %d for test active\n", val);
1342 			/* To prevent erroneous activation of the compliance
1343 			 * testing code, only accept an actual value of 1 here
1344 			 */
1345 			if (val == 1)
1346 				intel_dp->compliance.test_active = true;
1347 			else
1348 				intel_dp->compliance.test_active = false;
1349 		}
1350 	}
1351 	drm_connector_list_iter_end(&conn_iter);
1352 	kfree(input_buffer);
1353 	if (status < 0)
1354 		return status;
1355 
1356 	*offp += len;
1357 	return len;
1358 }
1359 
1360 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1361 {
1362 	struct drm_i915_private *dev_priv = m->private;
1363 	struct drm_device *dev = &dev_priv->drm;
1364 	struct drm_connector *connector;
1365 	struct drm_connector_list_iter conn_iter;
1366 	struct intel_dp *intel_dp;
1367 
1368 	drm_connector_list_iter_begin(dev, &conn_iter);
1369 	drm_for_each_connector_iter(connector, &conn_iter) {
1370 		struct intel_encoder *encoder;
1371 
1372 		if (connector->connector_type !=
1373 		    DRM_MODE_CONNECTOR_DisplayPort)
1374 			continue;
1375 
1376 		encoder = to_intel_encoder(connector->encoder);
1377 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1378 			continue;
1379 
1380 		if (encoder && connector->status == connector_status_connected) {
1381 			intel_dp = enc_to_intel_dp(encoder);
1382 			if (intel_dp->compliance.test_active)
1383 				seq_puts(m, "1");
1384 			else
1385 				seq_puts(m, "0");
1386 		} else
1387 			seq_puts(m, "0");
1388 	}
1389 	drm_connector_list_iter_end(&conn_iter);
1390 
1391 	return 0;
1392 }
1393 
1394 static int i915_displayport_test_active_open(struct inode *inode,
1395 					     struct file *file)
1396 {
1397 	return single_open(file, i915_displayport_test_active_show,
1398 			   inode->i_private);
1399 }
1400 
1401 static const struct file_operations i915_displayport_test_active_fops = {
1402 	.owner = THIS_MODULE,
1403 	.open = i915_displayport_test_active_open,
1404 	.read = seq_read,
1405 	.llseek = seq_lseek,
1406 	.release = single_release,
1407 	.write = i915_displayport_test_active_write
1408 };
1409 
1410 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1411 {
1412 	struct drm_i915_private *dev_priv = m->private;
1413 	struct drm_device *dev = &dev_priv->drm;
1414 	struct drm_connector *connector;
1415 	struct drm_connector_list_iter conn_iter;
1416 	struct intel_dp *intel_dp;
1417 
1418 	drm_connector_list_iter_begin(dev, &conn_iter);
1419 	drm_for_each_connector_iter(connector, &conn_iter) {
1420 		struct intel_encoder *encoder;
1421 
1422 		if (connector->connector_type !=
1423 		    DRM_MODE_CONNECTOR_DisplayPort)
1424 			continue;
1425 
1426 		encoder = to_intel_encoder(connector->encoder);
1427 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1428 			continue;
1429 
1430 		if (encoder && connector->status == connector_status_connected) {
1431 			intel_dp = enc_to_intel_dp(encoder);
1432 			if (intel_dp->compliance.test_type ==
1433 			    DP_TEST_LINK_EDID_READ)
1434 				seq_printf(m, "%lx",
1435 					   intel_dp->compliance.test_data.edid);
1436 			else if (intel_dp->compliance.test_type ==
1437 				 DP_TEST_LINK_VIDEO_PATTERN) {
1438 				seq_printf(m, "hdisplay: %d\n",
1439 					   intel_dp->compliance.test_data.hdisplay);
1440 				seq_printf(m, "vdisplay: %d\n",
1441 					   intel_dp->compliance.test_data.vdisplay);
1442 				seq_printf(m, "bpc: %u\n",
1443 					   intel_dp->compliance.test_data.bpc);
1444 			} else if (intel_dp->compliance.test_type ==
1445 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1446 				seq_printf(m, "pattern: %d\n",
1447 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1448 				seq_printf(m, "Number of lanes: %d\n",
1449 					   intel_dp->compliance.test_data.phytest.num_lanes);
1450 				seq_printf(m, "Link Rate: %d\n",
1451 					   intel_dp->compliance.test_data.phytest.link_rate);
1452 				seq_printf(m, "level: %02x\n",
1453 					   intel_dp->train_set[0]);
1454 			}
1455 		} else
1456 			seq_puts(m, "0");
1457 	}
1458 	drm_connector_list_iter_end(&conn_iter);
1459 
1460 	return 0;
1461 }
1462 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1463 
1464 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1465 {
1466 	struct drm_i915_private *dev_priv = m->private;
1467 	struct drm_device *dev = &dev_priv->drm;
1468 	struct drm_connector *connector;
1469 	struct drm_connector_list_iter conn_iter;
1470 	struct intel_dp *intel_dp;
1471 
1472 	drm_connector_list_iter_begin(dev, &conn_iter);
1473 	drm_for_each_connector_iter(connector, &conn_iter) {
1474 		struct intel_encoder *encoder;
1475 
1476 		if (connector->connector_type !=
1477 		    DRM_MODE_CONNECTOR_DisplayPort)
1478 			continue;
1479 
1480 		encoder = to_intel_encoder(connector->encoder);
1481 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1482 			continue;
1483 
1484 		if (encoder && connector->status == connector_status_connected) {
1485 			intel_dp = enc_to_intel_dp(encoder);
1486 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1487 		} else
1488 			seq_puts(m, "0");
1489 	}
1490 	drm_connector_list_iter_end(&conn_iter);
1491 
1492 	return 0;
1493 }
1494 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1495 
1496 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1497 {
1498 	struct drm_i915_private *dev_priv = m->private;
1499 	struct drm_device *dev = &dev_priv->drm;
1500 	int level;
1501 	int num_levels;
1502 
1503 	if (IS_CHERRYVIEW(dev_priv))
1504 		num_levels = 3;
1505 	else if (IS_VALLEYVIEW(dev_priv))
1506 		num_levels = 1;
1507 	else if (IS_G4X(dev_priv))
1508 		num_levels = 3;
1509 	else
1510 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1511 
1512 	drm_modeset_lock_all(dev);
1513 
1514 	for (level = 0; level < num_levels; level++) {
1515 		unsigned int latency = wm[level];
1516 
1517 		/*
1518 		 * - WM1+ latency values in 0.5us units
1519 		 * - latencies are in us on gen9/vlv/chv
1520 		 */
1521 		if (DISPLAY_VER(dev_priv) >= 9 ||
1522 		    IS_VALLEYVIEW(dev_priv) ||
1523 		    IS_CHERRYVIEW(dev_priv) ||
1524 		    IS_G4X(dev_priv))
1525 			latency *= 10;
1526 		else if (level > 0)
1527 			latency *= 5;
1528 
1529 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1530 			   level, wm[level], latency / 10, latency % 10);
1531 	}
1532 
1533 	drm_modeset_unlock_all(dev);
1534 }
1535 
1536 static int pri_wm_latency_show(struct seq_file *m, void *data)
1537 {
1538 	struct drm_i915_private *dev_priv = m->private;
1539 	const u16 *latencies;
1540 
1541 	if (DISPLAY_VER(dev_priv) >= 9)
1542 		latencies = dev_priv->wm.skl_latency;
1543 	else
1544 		latencies = dev_priv->wm.pri_latency;
1545 
1546 	wm_latency_show(m, latencies);
1547 
1548 	return 0;
1549 }
1550 
1551 static int spr_wm_latency_show(struct seq_file *m, void *data)
1552 {
1553 	struct drm_i915_private *dev_priv = m->private;
1554 	const u16 *latencies;
1555 
1556 	if (DISPLAY_VER(dev_priv) >= 9)
1557 		latencies = dev_priv->wm.skl_latency;
1558 	else
1559 		latencies = dev_priv->wm.spr_latency;
1560 
1561 	wm_latency_show(m, latencies);
1562 
1563 	return 0;
1564 }
1565 
1566 static int cur_wm_latency_show(struct seq_file *m, void *data)
1567 {
1568 	struct drm_i915_private *dev_priv = m->private;
1569 	const u16 *latencies;
1570 
1571 	if (DISPLAY_VER(dev_priv) >= 9)
1572 		latencies = dev_priv->wm.skl_latency;
1573 	else
1574 		latencies = dev_priv->wm.cur_latency;
1575 
1576 	wm_latency_show(m, latencies);
1577 
1578 	return 0;
1579 }
1580 
1581 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1582 {
1583 	struct drm_i915_private *dev_priv = inode->i_private;
1584 
1585 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
1586 		return -ENODEV;
1587 
1588 	return single_open(file, pri_wm_latency_show, dev_priv);
1589 }
1590 
1591 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1592 {
1593 	struct drm_i915_private *dev_priv = inode->i_private;
1594 
1595 	if (HAS_GMCH(dev_priv))
1596 		return -ENODEV;
1597 
1598 	return single_open(file, spr_wm_latency_show, dev_priv);
1599 }
1600 
1601 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1602 {
1603 	struct drm_i915_private *dev_priv = inode->i_private;
1604 
1605 	if (HAS_GMCH(dev_priv))
1606 		return -ENODEV;
1607 
1608 	return single_open(file, cur_wm_latency_show, dev_priv);
1609 }
1610 
1611 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1612 				size_t len, loff_t *offp, u16 wm[8])
1613 {
1614 	struct seq_file *m = file->private_data;
1615 	struct drm_i915_private *dev_priv = m->private;
1616 	struct drm_device *dev = &dev_priv->drm;
1617 	u16 new[8] = { 0 };
1618 	int num_levels;
1619 	int level;
1620 	int ret;
1621 	char tmp[32];
1622 
1623 	if (IS_CHERRYVIEW(dev_priv))
1624 		num_levels = 3;
1625 	else if (IS_VALLEYVIEW(dev_priv))
1626 		num_levels = 1;
1627 	else if (IS_G4X(dev_priv))
1628 		num_levels = 3;
1629 	else
1630 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1631 
1632 	if (len >= sizeof(tmp))
1633 		return -EINVAL;
1634 
1635 	if (copy_from_user(tmp, ubuf, len))
1636 		return -EFAULT;
1637 
1638 	tmp[len] = '\0';
1639 
1640 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1641 		     &new[0], &new[1], &new[2], &new[3],
1642 		     &new[4], &new[5], &new[6], &new[7]);
1643 	if (ret != num_levels)
1644 		return -EINVAL;
1645 
1646 	drm_modeset_lock_all(dev);
1647 
1648 	for (level = 0; level < num_levels; level++)
1649 		wm[level] = new[level];
1650 
1651 	drm_modeset_unlock_all(dev);
1652 
1653 	return len;
1654 }
1655 
1656 
1657 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1658 				    size_t len, loff_t *offp)
1659 {
1660 	struct seq_file *m = file->private_data;
1661 	struct drm_i915_private *dev_priv = m->private;
1662 	u16 *latencies;
1663 
1664 	if (DISPLAY_VER(dev_priv) >= 9)
1665 		latencies = dev_priv->wm.skl_latency;
1666 	else
1667 		latencies = dev_priv->wm.pri_latency;
1668 
1669 	return wm_latency_write(file, ubuf, len, offp, latencies);
1670 }
1671 
1672 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1673 				    size_t len, loff_t *offp)
1674 {
1675 	struct seq_file *m = file->private_data;
1676 	struct drm_i915_private *dev_priv = m->private;
1677 	u16 *latencies;
1678 
1679 	if (DISPLAY_VER(dev_priv) >= 9)
1680 		latencies = dev_priv->wm.skl_latency;
1681 	else
1682 		latencies = dev_priv->wm.spr_latency;
1683 
1684 	return wm_latency_write(file, ubuf, len, offp, latencies);
1685 }
1686 
1687 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1688 				    size_t len, loff_t *offp)
1689 {
1690 	struct seq_file *m = file->private_data;
1691 	struct drm_i915_private *dev_priv = m->private;
1692 	u16 *latencies;
1693 
1694 	if (DISPLAY_VER(dev_priv) >= 9)
1695 		latencies = dev_priv->wm.skl_latency;
1696 	else
1697 		latencies = dev_priv->wm.cur_latency;
1698 
1699 	return wm_latency_write(file, ubuf, len, offp, latencies);
1700 }
1701 
1702 static const struct file_operations i915_pri_wm_latency_fops = {
1703 	.owner = THIS_MODULE,
1704 	.open = pri_wm_latency_open,
1705 	.read = seq_read,
1706 	.llseek = seq_lseek,
1707 	.release = single_release,
1708 	.write = pri_wm_latency_write
1709 };
1710 
1711 static const struct file_operations i915_spr_wm_latency_fops = {
1712 	.owner = THIS_MODULE,
1713 	.open = spr_wm_latency_open,
1714 	.read = seq_read,
1715 	.llseek = seq_lseek,
1716 	.release = single_release,
1717 	.write = spr_wm_latency_write
1718 };
1719 
1720 static const struct file_operations i915_cur_wm_latency_fops = {
1721 	.owner = THIS_MODULE,
1722 	.open = cur_wm_latency_open,
1723 	.read = seq_read,
1724 	.llseek = seq_lseek,
1725 	.release = single_release,
1726 	.write = cur_wm_latency_write
1727 };
1728 
1729 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1730 {
1731 	struct drm_i915_private *dev_priv = m->private;
1732 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1733 
1734 	/* Synchronize with everything first in case there's been an HPD
1735 	 * storm, but we haven't finished handling it in the kernel yet
1736 	 */
1737 	intel_synchronize_irq(dev_priv);
1738 	flush_work(&dev_priv->hotplug.dig_port_work);
1739 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1740 
1741 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1742 	seq_printf(m, "Detected: %s\n",
1743 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1744 
1745 	return 0;
1746 }
1747 
1748 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1749 					const char __user *ubuf, size_t len,
1750 					loff_t *offp)
1751 {
1752 	struct seq_file *m = file->private_data;
1753 	struct drm_i915_private *dev_priv = m->private;
1754 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1755 	unsigned int new_threshold;
1756 	int i;
1757 	char *newline;
1758 	char tmp[16];
1759 
1760 	if (len >= sizeof(tmp))
1761 		return -EINVAL;
1762 
1763 	if (copy_from_user(tmp, ubuf, len))
1764 		return -EFAULT;
1765 
1766 	tmp[len] = '\0';
1767 
1768 	/* Strip newline, if any */
1769 	newline = strchr(tmp, '\n');
1770 	if (newline)
1771 		*newline = '\0';
1772 
1773 	if (strcmp(tmp, "reset") == 0)
1774 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1775 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1776 		return -EINVAL;
1777 
1778 	if (new_threshold > 0)
1779 		drm_dbg_kms(&dev_priv->drm,
1780 			    "Setting HPD storm detection threshold to %d\n",
1781 			    new_threshold);
1782 	else
1783 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1784 
1785 	spin_lock_irq(&dev_priv->irq_lock);
1786 	hotplug->hpd_storm_threshold = new_threshold;
1787 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1788 	for_each_hpd_pin(i)
1789 		hotplug->stats[i].count = 0;
1790 	spin_unlock_irq(&dev_priv->irq_lock);
1791 
1792 	/* Re-enable hpd immediately if we were in an irq storm */
1793 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1794 
1795 	return len;
1796 }
1797 
1798 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1799 {
1800 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1801 }
1802 
1803 static const struct file_operations i915_hpd_storm_ctl_fops = {
1804 	.owner = THIS_MODULE,
1805 	.open = i915_hpd_storm_ctl_open,
1806 	.read = seq_read,
1807 	.llseek = seq_lseek,
1808 	.release = single_release,
1809 	.write = i915_hpd_storm_ctl_write
1810 };
1811 
1812 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1813 {
1814 	struct drm_i915_private *dev_priv = m->private;
1815 
1816 	seq_printf(m, "Enabled: %s\n",
1817 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1818 
1819 	return 0;
1820 }
1821 
1822 static int
1823 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1824 {
1825 	return single_open(file, i915_hpd_short_storm_ctl_show,
1826 			   inode->i_private);
1827 }
1828 
1829 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1830 					      const char __user *ubuf,
1831 					      size_t len, loff_t *offp)
1832 {
1833 	struct seq_file *m = file->private_data;
1834 	struct drm_i915_private *dev_priv = m->private;
1835 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1836 	char *newline;
1837 	char tmp[16];
1838 	int i;
1839 	bool new_state;
1840 
1841 	if (len >= sizeof(tmp))
1842 		return -EINVAL;
1843 
1844 	if (copy_from_user(tmp, ubuf, len))
1845 		return -EFAULT;
1846 
1847 	tmp[len] = '\0';
1848 
1849 	/* Strip newline, if any */
1850 	newline = strchr(tmp, '\n');
1851 	if (newline)
1852 		*newline = '\0';
1853 
1854 	/* Reset to the "default" state for this system */
1855 	if (strcmp(tmp, "reset") == 0)
1856 		new_state = !HAS_DP_MST(dev_priv);
1857 	else if (kstrtobool(tmp, &new_state) != 0)
1858 		return -EINVAL;
1859 
1860 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1861 		    new_state ? "En" : "Dis");
1862 
1863 	spin_lock_irq(&dev_priv->irq_lock);
1864 	hotplug->hpd_short_storm_enabled = new_state;
1865 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1866 	for_each_hpd_pin(i)
1867 		hotplug->stats[i].count = 0;
1868 	spin_unlock_irq(&dev_priv->irq_lock);
1869 
1870 	/* Re-enable hpd immediately if we were in an irq storm */
1871 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1872 
1873 	return len;
1874 }
1875 
1876 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1877 	.owner = THIS_MODULE,
1878 	.open = i915_hpd_short_storm_ctl_open,
1879 	.read = seq_read,
1880 	.llseek = seq_lseek,
1881 	.release = single_release,
1882 	.write = i915_hpd_short_storm_ctl_write,
1883 };
1884 
1885 static int i915_drrs_ctl_set(void *data, u64 val)
1886 {
1887 	struct drm_i915_private *dev_priv = data;
1888 	struct drm_device *dev = &dev_priv->drm;
1889 	struct intel_crtc *crtc;
1890 
1891 	if (DISPLAY_VER(dev_priv) < 7)
1892 		return -ENODEV;
1893 
1894 	for_each_intel_crtc(dev, crtc) {
1895 		struct drm_connector_list_iter conn_iter;
1896 		struct intel_crtc_state *crtc_state;
1897 		struct drm_connector *connector;
1898 		struct drm_crtc_commit *commit;
1899 		int ret;
1900 
1901 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1902 		if (ret)
1903 			return ret;
1904 
1905 		crtc_state = to_intel_crtc_state(crtc->base.state);
1906 
1907 		if (!crtc_state->hw.active ||
1908 		    !crtc_state->has_drrs)
1909 			goto out;
1910 
1911 		commit = crtc_state->uapi.commit;
1912 		if (commit) {
1913 			ret = wait_for_completion_interruptible(&commit->hw_done);
1914 			if (ret)
1915 				goto out;
1916 		}
1917 
1918 		drm_connector_list_iter_begin(dev, &conn_iter);
1919 		drm_for_each_connector_iter(connector, &conn_iter) {
1920 			struct intel_encoder *encoder;
1921 			struct intel_dp *intel_dp;
1922 
1923 			if (!(crtc_state->uapi.connector_mask &
1924 			      drm_connector_mask(connector)))
1925 				continue;
1926 
1927 			encoder = intel_attached_encoder(to_intel_connector(connector));
1928 			if (encoder->type != INTEL_OUTPUT_EDP)
1929 				continue;
1930 
1931 			drm_dbg(&dev_priv->drm,
1932 				"Manually %sabling DRRS. %llu\n",
1933 				val ? "en" : "dis", val);
1934 
1935 			intel_dp = enc_to_intel_dp(encoder);
1936 			if (val)
1937 				intel_drrs_enable(intel_dp, crtc_state);
1938 			else
1939 				intel_drrs_disable(intel_dp, crtc_state);
1940 		}
1941 		drm_connector_list_iter_end(&conn_iter);
1942 
1943 out:
1944 		drm_modeset_unlock(&crtc->base.mutex);
1945 		if (ret)
1946 			return ret;
1947 	}
1948 
1949 	return 0;
1950 }
1951 
1952 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1953 
1954 static ssize_t
1955 i915_fifo_underrun_reset_write(struct file *filp,
1956 			       const char __user *ubuf,
1957 			       size_t cnt, loff_t *ppos)
1958 {
1959 	struct drm_i915_private *dev_priv = filp->private_data;
1960 	struct intel_crtc *crtc;
1961 	struct drm_device *dev = &dev_priv->drm;
1962 	int ret;
1963 	bool reset;
1964 
1965 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1966 	if (ret)
1967 		return ret;
1968 
1969 	if (!reset)
1970 		return cnt;
1971 
1972 	for_each_intel_crtc(dev, crtc) {
1973 		struct drm_crtc_commit *commit;
1974 		struct intel_crtc_state *crtc_state;
1975 
1976 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1977 		if (ret)
1978 			return ret;
1979 
1980 		crtc_state = to_intel_crtc_state(crtc->base.state);
1981 		commit = crtc_state->uapi.commit;
1982 		if (commit) {
1983 			ret = wait_for_completion_interruptible(&commit->hw_done);
1984 			if (!ret)
1985 				ret = wait_for_completion_interruptible(&commit->flip_done);
1986 		}
1987 
1988 		if (!ret && crtc_state->hw.active) {
1989 			drm_dbg_kms(&dev_priv->drm,
1990 				    "Re-arming FIFO underruns on pipe %c\n",
1991 				    pipe_name(crtc->pipe));
1992 
1993 			intel_crtc_arm_fifo_underrun(crtc, crtc_state);
1994 		}
1995 
1996 		drm_modeset_unlock(&crtc->base.mutex);
1997 
1998 		if (ret)
1999 			return ret;
2000 	}
2001 
2002 	intel_fbc_reset_underrun(dev_priv);
2003 
2004 	return cnt;
2005 }
2006 
2007 static const struct file_operations i915_fifo_underrun_reset_ops = {
2008 	.owner = THIS_MODULE,
2009 	.open = simple_open,
2010 	.write = i915_fifo_underrun_reset_write,
2011 	.llseek = default_llseek,
2012 };
2013 
2014 static const struct drm_info_list intel_display_debugfs_list[] = {
2015 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
2016 	{"i915_ips_status", i915_ips_status, 0},
2017 	{"i915_sr_status", i915_sr_status, 0},
2018 	{"i915_opregion", i915_opregion, 0},
2019 	{"i915_vbt", i915_vbt, 0},
2020 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2021 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
2022 	{"i915_power_domain_info", i915_power_domain_info, 0},
2023 	{"i915_dmc_info", i915_dmc_info, 0},
2024 	{"i915_display_info", i915_display_info, 0},
2025 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
2026 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
2027 	{"i915_ddb_info", i915_ddb_info, 0},
2028 	{"i915_drrs_status", i915_drrs_status, 0},
2029 	{"i915_lpsp_status", i915_lpsp_status, 0},
2030 };
2031 
2032 static const struct {
2033 	const char *name;
2034 	const struct file_operations *fops;
2035 } intel_display_debugfs_files[] = {
2036 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2037 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2038 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2039 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2040 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
2041 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
2042 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
2043 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2044 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2045 	{"i915_ipc_status", &i915_ipc_status_fops},
2046 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
2047 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2048 };
2049 
2050 void intel_display_debugfs_register(struct drm_i915_private *i915)
2051 {
2052 	struct drm_minor *minor = i915->drm.primary;
2053 	int i;
2054 
2055 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2056 		debugfs_create_file(intel_display_debugfs_files[i].name,
2057 				    S_IRUGO | S_IWUSR,
2058 				    minor->debugfs_root,
2059 				    to_i915(minor->dev),
2060 				    intel_display_debugfs_files[i].fops);
2061 	}
2062 
2063 	drm_debugfs_create_files(intel_display_debugfs_list,
2064 				 ARRAY_SIZE(intel_display_debugfs_list),
2065 				 minor->debugfs_root, minor);
2066 
2067 	intel_fbc_debugfs_register(i915);
2068 }
2069 
2070 static int i915_panel_show(struct seq_file *m, void *data)
2071 {
2072 	struct drm_connector *connector = m->private;
2073 	struct intel_dp *intel_dp =
2074 		intel_attached_dp(to_intel_connector(connector));
2075 
2076 	if (connector->status != connector_status_connected)
2077 		return -ENODEV;
2078 
2079 	seq_printf(m, "Panel power up delay: %d\n",
2080 		   intel_dp->pps.panel_power_up_delay);
2081 	seq_printf(m, "Panel power down delay: %d\n",
2082 		   intel_dp->pps.panel_power_down_delay);
2083 	seq_printf(m, "Backlight on delay: %d\n",
2084 		   intel_dp->pps.backlight_on_delay);
2085 	seq_printf(m, "Backlight off delay: %d\n",
2086 		   intel_dp->pps.backlight_off_delay);
2087 
2088 	return 0;
2089 }
2090 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2091 
2092 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2093 {
2094 	struct drm_connector *connector = m->private;
2095 	struct drm_i915_private *i915 = to_i915(connector->dev);
2096 	struct intel_connector *intel_connector = to_intel_connector(connector);
2097 	int ret;
2098 
2099 	ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
2100 	if (ret)
2101 		return ret;
2102 
2103 	if (!connector->encoder || connector->status != connector_status_connected) {
2104 		ret = -ENODEV;
2105 		goto out;
2106 	}
2107 
2108 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2109 		   connector->base.id);
2110 	intel_hdcp_info(m, intel_connector);
2111 
2112 out:
2113 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
2114 
2115 	return ret;
2116 }
2117 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2118 
2119 static int i915_psr_status_show(struct seq_file *m, void *data)
2120 {
2121 	struct drm_connector *connector = m->private;
2122 	struct intel_dp *intel_dp =
2123 		intel_attached_dp(to_intel_connector(connector));
2124 
2125 	return intel_psr_status(m, intel_dp);
2126 }
2127 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2128 
2129 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2130 {
2131 	struct drm_connector *connector = m->private;
2132 	struct drm_i915_private *i915 = to_i915(connector->dev);
2133 	struct intel_encoder *encoder;
2134 	bool lpsp_capable = false;
2135 
2136 	encoder = intel_attached_encoder(to_intel_connector(connector));
2137 	if (!encoder)
2138 		return -ENODEV;
2139 
2140 	if (connector->status != connector_status_connected)
2141 		return -ENODEV;
2142 
2143 	if (DISPLAY_VER(i915) >= 13)
2144 		lpsp_capable = encoder->port <= PORT_B;
2145 	else if (DISPLAY_VER(i915) >= 12)
2146 		/*
2147 		 * Actually TGL can drive LPSP on port till DDI_C
2148 		 * but there is no physical connected DDI_C on TGL sku's,
2149 		 * even driver is not initilizing DDI_C port for gen12.
2150 		 */
2151 		lpsp_capable = encoder->port <= PORT_B;
2152 	else if (DISPLAY_VER(i915) == 11)
2153 		lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2154 				connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2155 	else if (IS_DISPLAY_VER(i915, 9, 10))
2156 		lpsp_capable = (encoder->port == PORT_A &&
2157 				(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2158 				 connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2159 				 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2160 	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2161 		lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP;
2162 
2163 	seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
2164 
2165 	return 0;
2166 }
2167 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2168 
2169 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2170 {
2171 	struct drm_connector *connector = m->private;
2172 	struct drm_device *dev = connector->dev;
2173 	struct drm_crtc *crtc;
2174 	struct intel_dp *intel_dp;
2175 	struct drm_modeset_acquire_ctx ctx;
2176 	struct intel_crtc_state *crtc_state = NULL;
2177 	int ret = 0;
2178 	bool try_again = false;
2179 
2180 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2181 
2182 	do {
2183 		try_again = false;
2184 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2185 				       &ctx);
2186 		if (ret) {
2187 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2188 				try_again = true;
2189 				continue;
2190 			}
2191 			break;
2192 		}
2193 		crtc = connector->state->crtc;
2194 		if (connector->status != connector_status_connected || !crtc) {
2195 			ret = -ENODEV;
2196 			break;
2197 		}
2198 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2199 		if (ret == -EDEADLK) {
2200 			ret = drm_modeset_backoff(&ctx);
2201 			if (!ret) {
2202 				try_again = true;
2203 				continue;
2204 			}
2205 			break;
2206 		} else if (ret) {
2207 			break;
2208 		}
2209 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2210 		crtc_state = to_intel_crtc_state(crtc->state);
2211 		seq_printf(m, "DSC_Enabled: %s\n",
2212 			   yesno(crtc_state->dsc.compression_enable));
2213 		seq_printf(m, "DSC_Sink_Support: %s\n",
2214 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2215 		seq_printf(m, "Force_DSC_Enable: %s\n",
2216 			   yesno(intel_dp->force_dsc_en));
2217 		if (!intel_dp_is_edp(intel_dp))
2218 			seq_printf(m, "FEC_Sink_Support: %s\n",
2219 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2220 	} while (try_again);
2221 
2222 	drm_modeset_drop_locks(&ctx);
2223 	drm_modeset_acquire_fini(&ctx);
2224 
2225 	return ret;
2226 }
2227 
2228 static ssize_t i915_dsc_fec_support_write(struct file *file,
2229 					  const char __user *ubuf,
2230 					  size_t len, loff_t *offp)
2231 {
2232 	bool dsc_enable = false;
2233 	int ret;
2234 	struct drm_connector *connector =
2235 		((struct seq_file *)file->private_data)->private;
2236 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2237 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2238 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2239 
2240 	if (len == 0)
2241 		return 0;
2242 
2243 	drm_dbg(&i915->drm,
2244 		"Copied %zu bytes from user to force DSC\n", len);
2245 
2246 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2247 	if (ret < 0)
2248 		return ret;
2249 
2250 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2251 		(dsc_enable) ? "true" : "false");
2252 	intel_dp->force_dsc_en = dsc_enable;
2253 
2254 	*offp += len;
2255 	return len;
2256 }
2257 
2258 static int i915_dsc_fec_support_open(struct inode *inode,
2259 				     struct file *file)
2260 {
2261 	return single_open(file, i915_dsc_fec_support_show,
2262 			   inode->i_private);
2263 }
2264 
2265 static const struct file_operations i915_dsc_fec_support_fops = {
2266 	.owner = THIS_MODULE,
2267 	.open = i915_dsc_fec_support_open,
2268 	.read = seq_read,
2269 	.llseek = seq_lseek,
2270 	.release = single_release,
2271 	.write = i915_dsc_fec_support_write
2272 };
2273 
2274 static int i915_dsc_bpp_show(struct seq_file *m, void *data)
2275 {
2276 	struct drm_connector *connector = m->private;
2277 	struct drm_device *dev = connector->dev;
2278 	struct drm_crtc *crtc;
2279 	struct intel_crtc_state *crtc_state;
2280 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2281 	int ret;
2282 
2283 	if (!encoder)
2284 		return -ENODEV;
2285 
2286 	ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
2287 	if (ret)
2288 		return ret;
2289 
2290 	crtc = connector->state->crtc;
2291 	if (connector->status != connector_status_connected || !crtc) {
2292 		ret = -ENODEV;
2293 		goto out;
2294 	}
2295 
2296 	crtc_state = to_intel_crtc_state(crtc->state);
2297 	seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp);
2298 
2299 out:	drm_modeset_unlock(&dev->mode_config.connection_mutex);
2300 
2301 	return ret;
2302 }
2303 
2304 static ssize_t i915_dsc_bpp_write(struct file *file,
2305 				  const char __user *ubuf,
2306 				  size_t len, loff_t *offp)
2307 {
2308 	struct drm_connector *connector =
2309 		((struct seq_file *)file->private_data)->private;
2310 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2311 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2312 	int dsc_bpp = 0;
2313 	int ret;
2314 
2315 	ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp);
2316 	if (ret < 0)
2317 		return ret;
2318 
2319 	intel_dp->force_dsc_bpp = dsc_bpp;
2320 	*offp += len;
2321 
2322 	return len;
2323 }
2324 
2325 static int i915_dsc_bpp_open(struct inode *inode,
2326 			     struct file *file)
2327 {
2328 	return single_open(file, i915_dsc_bpp_show,
2329 			   inode->i_private);
2330 }
2331 
2332 static const struct file_operations i915_dsc_bpp_fops = {
2333 	.owner = THIS_MODULE,
2334 	.open = i915_dsc_bpp_open,
2335 	.read = seq_read,
2336 	.llseek = seq_lseek,
2337 	.release = single_release,
2338 	.write = i915_dsc_bpp_write
2339 };
2340 
2341 /**
2342  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2343  * @connector: pointer to a registered drm_connector
2344  *
2345  * Cleanup will be done by drm_connector_unregister() through a call to
2346  * drm_debugfs_connector_remove().
2347  */
2348 void intel_connector_debugfs_add(struct intel_connector *intel_connector)
2349 {
2350 	struct drm_connector *connector = &intel_connector->base;
2351 	struct dentry *root = connector->debugfs_entry;
2352 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2353 
2354 	/* The connector must have been registered beforehands. */
2355 	if (!root)
2356 		return;
2357 
2358 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2359 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2360 				    connector, &i915_panel_fops);
2361 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2362 				    connector, &i915_psr_sink_status_fops);
2363 	}
2364 
2365 	if (HAS_PSR(dev_priv) &&
2366 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2367 		debugfs_create_file("i915_psr_status", 0444, root,
2368 				    connector, &i915_psr_status_fops);
2369 	}
2370 
2371 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2372 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2373 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2374 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2375 				    connector, &i915_hdcp_sink_capability_fops);
2376 	}
2377 
2378 	if (DISPLAY_VER(dev_priv) >= 11 &&
2379 	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2380 	    !to_intel_connector(connector)->mst_port) ||
2381 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
2382 		debugfs_create_file("i915_dsc_fec_support", 0644, root,
2383 				    connector, &i915_dsc_fec_support_fops);
2384 
2385 		debugfs_create_file("i915_dsc_bpp", 0644, root,
2386 				    connector, &i915_dsc_bpp_fops);
2387 	}
2388 
2389 	if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2390 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2391 	    connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2392 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2393 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
2394 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2395 				    connector, &i915_lpsp_capability_fops);
2396 }
2397 
2398 /**
2399  * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
2400  * @crtc: pointer to a drm_crtc
2401  *
2402  * Failure to add debugfs entries should generally be ignored.
2403  */
2404 void intel_crtc_debugfs_add(struct drm_crtc *crtc)
2405 {
2406 	if (!crtc->debugfs_entry)
2407 		return;
2408 
2409 	crtc_updates_add(crtc);
2410 	intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc));
2411 }
2412