1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_de.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dmc.h"
15 #include "intel_dp.h"
16 #include "intel_dp_mst.h"
17 #include "intel_drrs.h"
18 #include "intel_fbc.h"
19 #include "intel_hdcp.h"
20 #include "intel_hdmi.h"
21 #include "intel_pm.h"
22 #include "intel_psr.h"
23 #include "intel_sprite.h"
24 
25 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
26 {
27 	return to_i915(node->minor->dev);
28 }
29 
30 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
31 {
32 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
33 
34 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
35 		   dev_priv->fb_tracking.busy_bits);
36 
37 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
38 		   dev_priv->fb_tracking.flip_bits);
39 
40 	return 0;
41 }
42 
43 static int i915_ips_status(struct seq_file *m, void *unused)
44 {
45 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
46 	intel_wakeref_t wakeref;
47 
48 	if (!HAS_IPS(dev_priv))
49 		return -ENODEV;
50 
51 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
52 
53 	seq_printf(m, "Enabled by kernel parameter: %s\n",
54 		   yesno(dev_priv->params.enable_ips));
55 
56 	if (DISPLAY_VER(dev_priv) >= 8) {
57 		seq_puts(m, "Currently: unknown\n");
58 	} else {
59 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
60 			seq_puts(m, "Currently: enabled\n");
61 		else
62 			seq_puts(m, "Currently: disabled\n");
63 	}
64 
65 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
66 
67 	return 0;
68 }
69 
70 static int i915_sr_status(struct seq_file *m, void *unused)
71 {
72 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
73 	intel_wakeref_t wakeref;
74 	bool sr_enabled = false;
75 
76 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
77 
78 	if (DISPLAY_VER(dev_priv) >= 9)
79 		/* no global SR status; inspect per-plane WM */;
80 	else if (HAS_PCH_SPLIT(dev_priv))
81 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
82 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
83 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
84 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
85 	else if (IS_I915GM(dev_priv))
86 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
87 	else if (IS_PINEVIEW(dev_priv))
88 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
89 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
90 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
91 
92 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
93 
94 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
95 
96 	return 0;
97 }
98 
99 static int i915_opregion(struct seq_file *m, void *unused)
100 {
101 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
102 
103 	if (opregion->header)
104 		seq_write(m, opregion->header, OPREGION_SIZE);
105 
106 	return 0;
107 }
108 
109 static int i915_vbt(struct seq_file *m, void *unused)
110 {
111 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
112 
113 	if (opregion->vbt)
114 		seq_write(m, opregion->vbt, opregion->vbt_size);
115 
116 	return 0;
117 }
118 
119 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
120 {
121 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
122 	struct drm_device *dev = &dev_priv->drm;
123 	struct intel_framebuffer *fbdev_fb = NULL;
124 	struct drm_framebuffer *drm_fb;
125 
126 #ifdef CONFIG_DRM_FBDEV_EMULATION
127 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
128 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
129 
130 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
131 			   fbdev_fb->base.width,
132 			   fbdev_fb->base.height,
133 			   fbdev_fb->base.format->depth,
134 			   fbdev_fb->base.format->cpp[0] * 8,
135 			   fbdev_fb->base.modifier,
136 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
137 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
138 		seq_putc(m, '\n');
139 	}
140 #endif
141 
142 	mutex_lock(&dev->mode_config.fb_lock);
143 	drm_for_each_fb(drm_fb, dev) {
144 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
145 		if (fb == fbdev_fb)
146 			continue;
147 
148 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
149 			   fb->base.width,
150 			   fb->base.height,
151 			   fb->base.format->depth,
152 			   fb->base.format->cpp[0] * 8,
153 			   fb->base.modifier,
154 			   drm_framebuffer_read_refcount(&fb->base));
155 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
156 		seq_putc(m, '\n');
157 	}
158 	mutex_unlock(&dev->mode_config.fb_lock);
159 
160 	return 0;
161 }
162 
163 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
164 {
165 	u8 val;
166 	static const char * const sink_status[] = {
167 		"inactive",
168 		"transition to active, capture and display",
169 		"active, display from RFB",
170 		"active, capture and display on sink device timings",
171 		"transition to inactive, capture and display, timing re-sync",
172 		"reserved",
173 		"reserved",
174 		"sink internal error",
175 	};
176 	struct drm_connector *connector = m->private;
177 	struct intel_dp *intel_dp =
178 		intel_attached_dp(to_intel_connector(connector));
179 	int ret;
180 
181 	if (!CAN_PSR(intel_dp)) {
182 		seq_puts(m, "PSR Unsupported\n");
183 		return -ENODEV;
184 	}
185 
186 	if (connector->status != connector_status_connected)
187 		return -ENODEV;
188 
189 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
190 
191 	if (ret == 1) {
192 		const char *str = "unknown";
193 
194 		val &= DP_PSR_SINK_STATE_MASK;
195 		if (val < ARRAY_SIZE(sink_status))
196 			str = sink_status[val];
197 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
198 	} else {
199 		return ret;
200 	}
201 
202 	return 0;
203 }
204 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
205 
206 static void
207 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
208 {
209 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
210 	const char *status = "unknown";
211 	u32 val, status_val;
212 
213 	if (intel_dp->psr.psr2_enabled) {
214 		static const char * const live_status[] = {
215 			"IDLE",
216 			"CAPTURE",
217 			"CAPTURE_FS",
218 			"SLEEP",
219 			"BUFON_FW",
220 			"ML_UP",
221 			"SU_STANDBY",
222 			"FAST_SLEEP",
223 			"DEEP_SLEEP",
224 			"BUF_ON",
225 			"TG_ON"
226 		};
227 		val = intel_de_read(dev_priv,
228 				    EDP_PSR2_STATUS(intel_dp->psr.transcoder));
229 		status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
230 		if (status_val < ARRAY_SIZE(live_status))
231 			status = live_status[status_val];
232 	} else {
233 		static const char * const live_status[] = {
234 			"IDLE",
235 			"SRDONACK",
236 			"SRDENT",
237 			"BUFOFF",
238 			"BUFON",
239 			"AUXACK",
240 			"SRDOFFACK",
241 			"SRDENT_ON",
242 		};
243 		val = intel_de_read(dev_priv,
244 				    EDP_PSR_STATUS(intel_dp->psr.transcoder));
245 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
246 			      EDP_PSR_STATUS_STATE_SHIFT;
247 		if (status_val < ARRAY_SIZE(live_status))
248 			status = live_status[status_val];
249 	}
250 
251 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
252 }
253 
254 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
255 {
256 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
257 	struct intel_psr *psr = &intel_dp->psr;
258 	intel_wakeref_t wakeref;
259 	const char *status;
260 	bool enabled;
261 	u32 val;
262 
263 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
264 	if (psr->sink_support)
265 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
266 	seq_puts(m, "\n");
267 
268 	if (!psr->sink_support)
269 		return 0;
270 
271 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
272 	mutex_lock(&psr->lock);
273 
274 	if (psr->enabled)
275 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
276 	else
277 		status = "disabled";
278 	seq_printf(m, "PSR mode: %s\n", status);
279 
280 	if (!psr->enabled) {
281 		seq_printf(m, "PSR sink not reliable: %s\n",
282 			   yesno(psr->sink_not_reliable));
283 
284 		goto unlock;
285 	}
286 
287 	if (psr->psr2_enabled) {
288 		val = intel_de_read(dev_priv,
289 				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
290 		enabled = val & EDP_PSR2_ENABLE;
291 	} else {
292 		val = intel_de_read(dev_priv,
293 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
294 		enabled = val & EDP_PSR_ENABLE;
295 	}
296 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
297 		   enableddisabled(enabled), val);
298 	psr_source_status(intel_dp, m);
299 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
300 		   psr->busy_frontbuffer_bits);
301 
302 	/*
303 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
304 	 */
305 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
306 		val = intel_de_read(dev_priv,
307 				    EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
308 		val &= EDP_PSR_PERF_CNT_MASK;
309 		seq_printf(m, "Performance counter: %u\n", val);
310 	}
311 
312 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
313 		seq_printf(m, "Last attempted entry at: %lld\n",
314 			   psr->last_entry_attempt);
315 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
316 	}
317 
318 	if (psr->psr2_enabled) {
319 		u32 su_frames_val[3];
320 		int frame;
321 
322 		/*
323 		 * Reading all 3 registers before hand to minimize crossing a
324 		 * frame boundary between register reads
325 		 */
326 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
327 			val = intel_de_read(dev_priv,
328 					    PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
329 			su_frames_val[frame / 3] = val;
330 		}
331 
332 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
333 
334 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
335 			u32 su_blocks;
336 
337 			su_blocks = su_frames_val[frame / 3] &
338 				    PSR2_SU_STATUS_MASK(frame);
339 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
340 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
341 		}
342 
343 		seq_printf(m, "PSR2 selective fetch: %s\n",
344 			   enableddisabled(psr->psr2_sel_fetch_enabled));
345 	}
346 
347 unlock:
348 	mutex_unlock(&psr->lock);
349 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
350 
351 	return 0;
352 }
353 
354 static int i915_edp_psr_status(struct seq_file *m, void *data)
355 {
356 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
357 	struct intel_dp *intel_dp = NULL;
358 	struct intel_encoder *encoder;
359 
360 	if (!HAS_PSR(dev_priv))
361 		return -ENODEV;
362 
363 	/* Find the first EDP which supports PSR */
364 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
365 		intel_dp = enc_to_intel_dp(encoder);
366 		break;
367 	}
368 
369 	if (!intel_dp)
370 		return -ENODEV;
371 
372 	return intel_psr_status(m, intel_dp);
373 }
374 
375 static int
376 i915_edp_psr_debug_set(void *data, u64 val)
377 {
378 	struct drm_i915_private *dev_priv = data;
379 	struct intel_encoder *encoder;
380 	intel_wakeref_t wakeref;
381 	int ret = -ENODEV;
382 
383 	if (!HAS_PSR(dev_priv))
384 		return ret;
385 
386 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
387 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
388 
389 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
390 
391 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
392 
393 		// TODO: split to each transcoder's PSR debug state
394 		ret = intel_psr_debug_set(intel_dp, val);
395 
396 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
397 	}
398 
399 	return ret;
400 }
401 
402 static int
403 i915_edp_psr_debug_get(void *data, u64 *val)
404 {
405 	struct drm_i915_private *dev_priv = data;
406 	struct intel_encoder *encoder;
407 
408 	if (!HAS_PSR(dev_priv))
409 		return -ENODEV;
410 
411 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
412 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
413 
414 		// TODO: split to each transcoder's PSR debug state
415 		*val = READ_ONCE(intel_dp->psr.debug);
416 		return 0;
417 	}
418 
419 	return -ENODEV;
420 }
421 
422 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
423 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
424 			"%llu\n");
425 
426 static int i915_power_domain_info(struct seq_file *m, void *unused)
427 {
428 	struct drm_i915_private *i915 = node_to_i915(m->private);
429 
430 	intel_display_power_debug(i915, m);
431 
432 	return 0;
433 }
434 
435 static int i915_dmc_info(struct seq_file *m, void *unused)
436 {
437 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
438 	intel_wakeref_t wakeref;
439 	struct intel_dmc *dmc;
440 	i915_reg_t dc5_reg, dc6_reg = {};
441 
442 	if (!HAS_DMC(dev_priv))
443 		return -ENODEV;
444 
445 	dmc = &dev_priv->dmc;
446 
447 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
448 
449 	seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
450 	seq_printf(m, "path: %s\n", dmc->fw_path);
451 	seq_printf(m, "Pipe A fw support: %s\n",
452 		   yesno(GRAPHICS_VER(dev_priv) >= 12));
453 	seq_printf(m, "Pipe A fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEA].payload));
454 	seq_printf(m, "Pipe B fw support: %s\n", yesno(IS_ALDERLAKE_P(dev_priv)));
455 	seq_printf(m, "Pipe B fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEB].payload));
456 
457 	if (!intel_dmc_has_payload(dev_priv))
458 		goto out;
459 
460 	seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
461 		   DMC_VERSION_MINOR(dmc->version));
462 
463 	if (DISPLAY_VER(dev_priv) >= 12) {
464 		if (IS_DGFX(dev_priv)) {
465 			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
466 		} else {
467 			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
468 			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
469 		}
470 
471 		/*
472 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
473 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
474 		 * reg for DC3CO debugging and validation,
475 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
476 		 */
477 		seq_printf(m, "DC3CO count: %d\n",
478 			   intel_de_read(dev_priv, DMC_DEBUG3));
479 	} else {
480 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
481 						 SKL_DMC_DC3_DC5_COUNT;
482 		if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv))
483 			dc6_reg = SKL_DMC_DC5_DC6_COUNT;
484 	}
485 
486 	seq_printf(m, "DC3 -> DC5 count: %d\n",
487 		   intel_de_read(dev_priv, dc5_reg));
488 	if (dc6_reg.reg)
489 		seq_printf(m, "DC5 -> DC6 count: %d\n",
490 			   intel_de_read(dev_priv, dc6_reg));
491 
492 out:
493 	seq_printf(m, "program base: 0x%08x\n",
494 		   intel_de_read(dev_priv, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
495 	seq_printf(m, "ssp base: 0x%08x\n",
496 		   intel_de_read(dev_priv, DMC_SSP_BASE));
497 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
498 
499 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
500 
501 	return 0;
502 }
503 
504 static void intel_seq_print_mode(struct seq_file *m, int tabs,
505 				 const struct drm_display_mode *mode)
506 {
507 	int i;
508 
509 	for (i = 0; i < tabs; i++)
510 		seq_putc(m, '\t');
511 
512 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
513 }
514 
515 static void intel_encoder_info(struct seq_file *m,
516 			       struct intel_crtc *crtc,
517 			       struct intel_encoder *encoder)
518 {
519 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
520 	struct drm_connector_list_iter conn_iter;
521 	struct drm_connector *connector;
522 
523 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
524 		   encoder->base.base.id, encoder->base.name);
525 
526 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
527 	drm_for_each_connector_iter(connector, &conn_iter) {
528 		const struct drm_connector_state *conn_state =
529 			connector->state;
530 
531 		if (conn_state->best_encoder != &encoder->base)
532 			continue;
533 
534 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
535 			   connector->base.id, connector->name);
536 	}
537 	drm_connector_list_iter_end(&conn_iter);
538 }
539 
540 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
541 {
542 	const struct drm_display_mode *mode = panel->fixed_mode;
543 
544 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
545 }
546 
547 static void intel_hdcp_info(struct seq_file *m,
548 			    struct intel_connector *intel_connector)
549 {
550 	bool hdcp_cap, hdcp2_cap;
551 
552 	if (!intel_connector->hdcp.shim) {
553 		seq_puts(m, "No Connector Support");
554 		goto out;
555 	}
556 
557 	hdcp_cap = intel_hdcp_capable(intel_connector);
558 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
559 
560 	if (hdcp_cap)
561 		seq_puts(m, "HDCP1.4 ");
562 	if (hdcp2_cap)
563 		seq_puts(m, "HDCP2.2 ");
564 
565 	if (!hdcp_cap && !hdcp2_cap)
566 		seq_puts(m, "None");
567 
568 out:
569 	seq_puts(m, "\n");
570 }
571 
572 static void intel_dp_info(struct seq_file *m,
573 			  struct intel_connector *intel_connector)
574 {
575 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
576 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
577 	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
578 
579 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
580 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
581 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
582 		intel_panel_info(m, &intel_connector->panel);
583 
584 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
585 				edid ? edid->data : NULL, &intel_dp->aux);
586 }
587 
588 static void intel_dp_mst_info(struct seq_file *m,
589 			      struct intel_connector *intel_connector)
590 {
591 	bool has_audio = intel_connector->port->has_audio;
592 
593 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
594 }
595 
596 static void intel_hdmi_info(struct seq_file *m,
597 			    struct intel_connector *intel_connector)
598 {
599 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
600 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
601 
602 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
603 }
604 
605 static void intel_lvds_info(struct seq_file *m,
606 			    struct intel_connector *intel_connector)
607 {
608 	intel_panel_info(m, &intel_connector->panel);
609 }
610 
611 static void intel_connector_info(struct seq_file *m,
612 				 struct drm_connector *connector)
613 {
614 	struct intel_connector *intel_connector = to_intel_connector(connector);
615 	const struct drm_connector_state *conn_state = connector->state;
616 	struct intel_encoder *encoder =
617 		to_intel_encoder(conn_state->best_encoder);
618 	const struct drm_display_mode *mode;
619 
620 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
621 		   connector->base.id, connector->name,
622 		   drm_get_connector_status_name(connector->status));
623 
624 	if (connector->status == connector_status_disconnected)
625 		return;
626 
627 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
628 		   connector->display_info.width_mm,
629 		   connector->display_info.height_mm);
630 	seq_printf(m, "\tsubpixel order: %s\n",
631 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
632 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
633 
634 	if (!encoder)
635 		return;
636 
637 	switch (connector->connector_type) {
638 	case DRM_MODE_CONNECTOR_DisplayPort:
639 	case DRM_MODE_CONNECTOR_eDP:
640 		if (encoder->type == INTEL_OUTPUT_DP_MST)
641 			intel_dp_mst_info(m, intel_connector);
642 		else
643 			intel_dp_info(m, intel_connector);
644 		break;
645 	case DRM_MODE_CONNECTOR_LVDS:
646 		if (encoder->type == INTEL_OUTPUT_LVDS)
647 			intel_lvds_info(m, intel_connector);
648 		break;
649 	case DRM_MODE_CONNECTOR_HDMIA:
650 		if (encoder->type == INTEL_OUTPUT_HDMI ||
651 		    encoder->type == INTEL_OUTPUT_DDI)
652 			intel_hdmi_info(m, intel_connector);
653 		break;
654 	default:
655 		break;
656 	}
657 
658 	seq_puts(m, "\tHDCP version: ");
659 	intel_hdcp_info(m, intel_connector);
660 
661 	seq_printf(m, "\tmodes:\n");
662 	list_for_each_entry(mode, &connector->modes, head)
663 		intel_seq_print_mode(m, 2, mode);
664 }
665 
666 static const char *plane_type(enum drm_plane_type type)
667 {
668 	switch (type) {
669 	case DRM_PLANE_TYPE_OVERLAY:
670 		return "OVL";
671 	case DRM_PLANE_TYPE_PRIMARY:
672 		return "PRI";
673 	case DRM_PLANE_TYPE_CURSOR:
674 		return "CUR";
675 	/*
676 	 * Deliberately omitting default: to generate compiler warnings
677 	 * when a new drm_plane_type gets added.
678 	 */
679 	}
680 
681 	return "unknown";
682 }
683 
684 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
685 {
686 	/*
687 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
688 	 * will print them all to visualize if the values are misused
689 	 */
690 	snprintf(buf, bufsize,
691 		 "%s%s%s%s%s%s(0x%08x)",
692 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
693 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
694 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
695 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
696 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
697 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
698 		 rotation);
699 }
700 
701 static const char *plane_visibility(const struct intel_plane_state *plane_state)
702 {
703 	if (plane_state->uapi.visible)
704 		return "visible";
705 
706 	if (plane_state->planar_slave)
707 		return "planar-slave";
708 
709 	return "hidden";
710 }
711 
712 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
713 {
714 	const struct intel_plane_state *plane_state =
715 		to_intel_plane_state(plane->base.state);
716 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
717 	struct drm_rect src, dst;
718 	char rot_str[48];
719 
720 	src = drm_plane_state_src(&plane_state->uapi);
721 	dst = drm_plane_state_dest(&plane_state->uapi);
722 
723 	plane_rotation(rot_str, sizeof(rot_str),
724 		       plane_state->uapi.rotation);
725 
726 	seq_puts(m, "\t\tuapi: [FB:");
727 	if (fb)
728 		seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
729 			   &fb->format->format, fb->modifier, fb->width,
730 			   fb->height);
731 	else
732 		seq_puts(m, "0] n/a,0x0,0x0,");
733 	seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
734 		   ", rotation=%s\n", plane_visibility(plane_state),
735 		   DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
736 
737 	if (plane_state->planar_linked_plane)
738 		seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
739 			   plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
740 			   plane_state->planar_slave ? "slave" : "master");
741 }
742 
743 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
744 {
745 	const struct intel_plane_state *plane_state =
746 		to_intel_plane_state(plane->base.state);
747 	const struct drm_framebuffer *fb = plane_state->hw.fb;
748 	char rot_str[48];
749 
750 	if (!fb)
751 		return;
752 
753 	plane_rotation(rot_str, sizeof(rot_str),
754 		       plane_state->hw.rotation);
755 
756 	seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
757 		   DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
758 		   fb->base.id, &fb->format->format,
759 		   fb->modifier, fb->width, fb->height,
760 		   yesno(plane_state->uapi.visible),
761 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
762 		   DRM_RECT_ARG(&plane_state->uapi.dst),
763 		   rot_str);
764 }
765 
766 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
767 {
768 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
769 	struct intel_plane *plane;
770 
771 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
772 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
773 			   plane->base.base.id, plane->base.name,
774 			   plane_type(plane->base.type));
775 		intel_plane_uapi_info(m, plane);
776 		intel_plane_hw_info(m, plane);
777 	}
778 }
779 
780 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
781 {
782 	const struct intel_crtc_state *crtc_state =
783 		to_intel_crtc_state(crtc->base.state);
784 	int num_scalers = crtc->num_scalers;
785 	int i;
786 
787 	/* Not all platformas have a scaler */
788 	if (num_scalers) {
789 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
790 			   num_scalers,
791 			   crtc_state->scaler_state.scaler_users,
792 			   crtc_state->scaler_state.scaler_id);
793 
794 		for (i = 0; i < num_scalers; i++) {
795 			const struct intel_scaler *sc =
796 				&crtc_state->scaler_state.scalers[i];
797 
798 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
799 				   i, yesno(sc->in_use), sc->mode);
800 		}
801 		seq_puts(m, "\n");
802 	} else {
803 		seq_puts(m, "\tNo scalers available on this platform\n");
804 	}
805 }
806 
807 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
808 static void crtc_updates_info(struct seq_file *m,
809 			      struct intel_crtc *crtc,
810 			      const char *hdr)
811 {
812 	u64 count;
813 	int row;
814 
815 	count = 0;
816 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
817 		count += crtc->debug.vbl.times[row];
818 	seq_printf(m, "%sUpdates: %llu\n", hdr, count);
819 	if (!count)
820 		return;
821 
822 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
823 		char columns[80] = "       |";
824 		unsigned int x;
825 
826 		if (row & 1) {
827 			const char *units;
828 
829 			if (row > 10) {
830 				x = 1000000;
831 				units = "ms";
832 			} else {
833 				x = 1000;
834 				units = "us";
835 			}
836 
837 			snprintf(columns, sizeof(columns), "%4ld%s |",
838 				 DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
839 		}
840 
841 		if (crtc->debug.vbl.times[row]) {
842 			x = ilog2(crtc->debug.vbl.times[row]);
843 			memset(columns + 8, '*', x);
844 			columns[8 + x] = '\0';
845 		}
846 
847 		seq_printf(m, "%s%s\n", hdr, columns);
848 	}
849 
850 	seq_printf(m, "%sMin update: %lluns\n",
851 		   hdr, crtc->debug.vbl.min);
852 	seq_printf(m, "%sMax update: %lluns\n",
853 		   hdr, crtc->debug.vbl.max);
854 	seq_printf(m, "%sAverage update: %lluns\n",
855 		   hdr, div64_u64(crtc->debug.vbl.sum,  count));
856 	seq_printf(m, "%sOverruns > %uus: %u\n",
857 		   hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
858 }
859 
860 static int crtc_updates_show(struct seq_file *m, void *data)
861 {
862 	crtc_updates_info(m, m->private, "");
863 	return 0;
864 }
865 
866 static int crtc_updates_open(struct inode *inode, struct file *file)
867 {
868 	return single_open(file, crtc_updates_show, inode->i_private);
869 }
870 
871 static ssize_t crtc_updates_write(struct file *file,
872 				  const char __user *ubuf,
873 				  size_t len, loff_t *offp)
874 {
875 	struct seq_file *m = file->private_data;
876 	struct intel_crtc *crtc = m->private;
877 
878 	/* May race with an update. Meh. */
879 	memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
880 
881 	return len;
882 }
883 
884 static const struct file_operations crtc_updates_fops = {
885 	.owner = THIS_MODULE,
886 	.open = crtc_updates_open,
887 	.read = seq_read,
888 	.llseek = seq_lseek,
889 	.release = single_release,
890 	.write = crtc_updates_write
891 };
892 
893 static void crtc_updates_add(struct drm_crtc *crtc)
894 {
895 	debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
896 			    to_intel_crtc(crtc), &crtc_updates_fops);
897 }
898 
899 #else
900 static void crtc_updates_info(struct seq_file *m,
901 			      struct intel_crtc *crtc,
902 			      const char *hdr)
903 {
904 }
905 
906 static void crtc_updates_add(struct drm_crtc *crtc)
907 {
908 }
909 #endif
910 
911 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
912 {
913 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
914 	const struct intel_crtc_state *crtc_state =
915 		to_intel_crtc_state(crtc->base.state);
916 	struct intel_encoder *encoder;
917 
918 	seq_printf(m, "[CRTC:%d:%s]:\n",
919 		   crtc->base.base.id, crtc->base.name);
920 
921 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
922 		   yesno(crtc_state->uapi.enable),
923 		   yesno(crtc_state->uapi.active),
924 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
925 
926 	if (crtc_state->hw.enable) {
927 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
928 			   yesno(crtc_state->hw.active),
929 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
930 
931 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
932 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
933 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
934 
935 		intel_scaler_info(m, crtc);
936 	}
937 
938 	if (crtc_state->bigjoiner)
939 		seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
940 			   crtc_state->bigjoiner_linked_crtc->base.base.id,
941 			   crtc_state->bigjoiner_linked_crtc->base.name,
942 			   crtc_state->bigjoiner_slave ? "slave" : "master");
943 
944 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
945 				    crtc_state->uapi.encoder_mask)
946 		intel_encoder_info(m, crtc, encoder);
947 
948 	intel_plane_info(m, crtc);
949 
950 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
951 		   yesno(!crtc->cpu_fifo_underrun_disabled),
952 		   yesno(!crtc->pch_fifo_underrun_disabled));
953 
954 	crtc_updates_info(m, crtc, "\t");
955 }
956 
957 static int i915_display_info(struct seq_file *m, void *unused)
958 {
959 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
960 	struct drm_device *dev = &dev_priv->drm;
961 	struct intel_crtc *crtc;
962 	struct drm_connector *connector;
963 	struct drm_connector_list_iter conn_iter;
964 	intel_wakeref_t wakeref;
965 
966 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
967 
968 	drm_modeset_lock_all(dev);
969 
970 	seq_printf(m, "CRTC info\n");
971 	seq_printf(m, "---------\n");
972 	for_each_intel_crtc(dev, crtc)
973 		intel_crtc_info(m, crtc);
974 
975 	seq_printf(m, "\n");
976 	seq_printf(m, "Connector info\n");
977 	seq_printf(m, "--------------\n");
978 	drm_connector_list_iter_begin(dev, &conn_iter);
979 	drm_for_each_connector_iter(connector, &conn_iter)
980 		intel_connector_info(m, connector);
981 	drm_connector_list_iter_end(&conn_iter);
982 
983 	drm_modeset_unlock_all(dev);
984 
985 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
986 
987 	return 0;
988 }
989 
990 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
991 {
992 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
993 	struct drm_device *dev = &dev_priv->drm;
994 	int i;
995 
996 	drm_modeset_lock_all(dev);
997 
998 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
999 		   dev_priv->dpll.ref_clks.nssc,
1000 		   dev_priv->dpll.ref_clks.ssc);
1001 
1002 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
1003 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
1004 
1005 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
1006 			   pll->info->id);
1007 		seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
1008 			   pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
1009 		seq_printf(m, " tracked hardware state:\n");
1010 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
1011 		seq_printf(m, " dpll_md: 0x%08x\n",
1012 			   pll->state.hw_state.dpll_md);
1013 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
1014 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
1015 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
1016 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
1017 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
1018 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
1019 			   pll->state.hw_state.mg_refclkin_ctl);
1020 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
1021 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
1022 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
1023 			   pll->state.hw_state.mg_clktop2_hsclkctl);
1024 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
1025 			   pll->state.hw_state.mg_pll_div0);
1026 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
1027 			   pll->state.hw_state.mg_pll_div1);
1028 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
1029 			   pll->state.hw_state.mg_pll_lf);
1030 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
1031 			   pll->state.hw_state.mg_pll_frac_lock);
1032 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
1033 			   pll->state.hw_state.mg_pll_ssc);
1034 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
1035 			   pll->state.hw_state.mg_pll_bias);
1036 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
1037 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
1038 	}
1039 	drm_modeset_unlock_all(dev);
1040 
1041 	return 0;
1042 }
1043 
1044 static int i915_ipc_status_show(struct seq_file *m, void *data)
1045 {
1046 	struct drm_i915_private *dev_priv = m->private;
1047 
1048 	seq_printf(m, "Isochronous Priority Control: %s\n",
1049 			yesno(dev_priv->ipc_enabled));
1050 	return 0;
1051 }
1052 
1053 static int i915_ipc_status_open(struct inode *inode, struct file *file)
1054 {
1055 	struct drm_i915_private *dev_priv = inode->i_private;
1056 
1057 	if (!HAS_IPC(dev_priv))
1058 		return -ENODEV;
1059 
1060 	return single_open(file, i915_ipc_status_show, dev_priv);
1061 }
1062 
1063 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1064 				     size_t len, loff_t *offp)
1065 {
1066 	struct seq_file *m = file->private_data;
1067 	struct drm_i915_private *dev_priv = m->private;
1068 	intel_wakeref_t wakeref;
1069 	bool enable;
1070 	int ret;
1071 
1072 	ret = kstrtobool_from_user(ubuf, len, &enable);
1073 	if (ret < 0)
1074 		return ret;
1075 
1076 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1077 		if (!dev_priv->ipc_enabled && enable)
1078 			drm_info(&dev_priv->drm,
1079 				 "Enabling IPC: WM will be proper only after next commit\n");
1080 		dev_priv->ipc_enabled = enable;
1081 		intel_enable_ipc(dev_priv);
1082 	}
1083 
1084 	return len;
1085 }
1086 
1087 static const struct file_operations i915_ipc_status_fops = {
1088 	.owner = THIS_MODULE,
1089 	.open = i915_ipc_status_open,
1090 	.read = seq_read,
1091 	.llseek = seq_lseek,
1092 	.release = single_release,
1093 	.write = i915_ipc_status_write
1094 };
1095 
1096 static int i915_ddb_info(struct seq_file *m, void *unused)
1097 {
1098 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1099 	struct drm_device *dev = &dev_priv->drm;
1100 	struct skl_ddb_entry *entry;
1101 	struct intel_crtc *crtc;
1102 
1103 	if (DISPLAY_VER(dev_priv) < 9)
1104 		return -ENODEV;
1105 
1106 	drm_modeset_lock_all(dev);
1107 
1108 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1109 
1110 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1111 		struct intel_crtc_state *crtc_state =
1112 			to_intel_crtc_state(crtc->base.state);
1113 		enum pipe pipe = crtc->pipe;
1114 		enum plane_id plane_id;
1115 
1116 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1117 
1118 		for_each_plane_id_on_crtc(crtc, plane_id) {
1119 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1120 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1121 				   entry->start, entry->end,
1122 				   skl_ddb_entry_size(entry));
1123 		}
1124 
1125 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1126 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1127 			   entry->end, skl_ddb_entry_size(entry));
1128 	}
1129 
1130 	drm_modeset_unlock_all(dev);
1131 
1132 	return 0;
1133 }
1134 
1135 static void drrs_status_per_crtc(struct seq_file *m,
1136 				 struct drm_device *dev,
1137 				 struct intel_crtc *crtc)
1138 {
1139 	struct drm_i915_private *dev_priv = to_i915(dev);
1140 	struct i915_drrs *drrs = &dev_priv->drrs;
1141 	int vrefresh = 0;
1142 	struct drm_connector *connector;
1143 	struct drm_connector_list_iter conn_iter;
1144 
1145 	drm_connector_list_iter_begin(dev, &conn_iter);
1146 	drm_for_each_connector_iter(connector, &conn_iter) {
1147 		bool supported = false;
1148 
1149 		if (connector->state->crtc != &crtc->base)
1150 			continue;
1151 
1152 		seq_printf(m, "%s:\n", connector->name);
1153 
1154 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1155 		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1156 			supported = true;
1157 
1158 		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1159 	}
1160 	drm_connector_list_iter_end(&conn_iter);
1161 
1162 	seq_puts(m, "\n");
1163 
1164 	if (to_intel_crtc_state(crtc->base.state)->has_drrs) {
1165 		struct intel_panel *panel;
1166 
1167 		mutex_lock(&drrs->mutex);
1168 		/* DRRS Supported */
1169 		seq_puts(m, "\tDRRS Enabled: Yes\n");
1170 
1171 		/* disable_drrs() will make drrs->dp NULL */
1172 		if (!drrs->dp) {
1173 			seq_puts(m, "Idleness DRRS: Disabled\n");
1174 			mutex_unlock(&drrs->mutex);
1175 			return;
1176 		}
1177 
1178 		panel = &drrs->dp->attached_connector->panel;
1179 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1180 					drrs->busy_frontbuffer_bits);
1181 
1182 		seq_puts(m, "\n\t\t");
1183 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1184 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1185 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1186 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1187 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1188 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1189 		} else {
1190 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1191 						drrs->refresh_rate_type);
1192 			mutex_unlock(&drrs->mutex);
1193 			return;
1194 		}
1195 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1196 
1197 		seq_puts(m, "\n\t\t");
1198 		mutex_unlock(&drrs->mutex);
1199 	} else {
1200 		/* DRRS not supported. Print the VBT parameter*/
1201 		seq_puts(m, "\tDRRS Enabled : No");
1202 	}
1203 	seq_puts(m, "\n");
1204 }
1205 
1206 static int i915_drrs_status(struct seq_file *m, void *unused)
1207 {
1208 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1209 	struct drm_device *dev = &dev_priv->drm;
1210 	struct intel_crtc *crtc;
1211 	int active_crtc_cnt = 0;
1212 
1213 	drm_modeset_lock_all(dev);
1214 	for_each_intel_crtc(dev, crtc) {
1215 		if (crtc->base.state->active) {
1216 			active_crtc_cnt++;
1217 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1218 
1219 			drrs_status_per_crtc(m, dev, crtc);
1220 		}
1221 	}
1222 	drm_modeset_unlock_all(dev);
1223 
1224 	if (!active_crtc_cnt)
1225 		seq_puts(m, "No active crtc found\n");
1226 
1227 	return 0;
1228 }
1229 
1230 static bool
1231 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1232 			      enum i915_power_well_id power_well_id)
1233 {
1234 	intel_wakeref_t wakeref;
1235 	bool is_enabled;
1236 
1237 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1238 	is_enabled = intel_display_power_well_is_enabled(i915,
1239 							 power_well_id);
1240 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1241 
1242 	return is_enabled;
1243 }
1244 
1245 static int i915_lpsp_status(struct seq_file *m, void *unused)
1246 {
1247 	struct drm_i915_private *i915 = node_to_i915(m->private);
1248 	bool lpsp_enabled = false;
1249 
1250 	if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) {
1251 		lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2);
1252 	} else if (IS_DISPLAY_VER(i915, 11, 12)) {
1253 		lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3);
1254 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
1255 		lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL);
1256 	} else {
1257 		seq_puts(m, "LPSP: not supported\n");
1258 		return 0;
1259 	}
1260 
1261 	seq_printf(m, "LPSP: %s\n", enableddisabled(lpsp_enabled));
1262 
1263 	return 0;
1264 }
1265 
1266 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1267 {
1268 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1269 	struct drm_device *dev = &dev_priv->drm;
1270 	struct intel_encoder *intel_encoder;
1271 	struct intel_digital_port *dig_port;
1272 	struct drm_connector *connector;
1273 	struct drm_connector_list_iter conn_iter;
1274 
1275 	drm_connector_list_iter_begin(dev, &conn_iter);
1276 	drm_for_each_connector_iter(connector, &conn_iter) {
1277 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1278 			continue;
1279 
1280 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1281 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1282 			continue;
1283 
1284 		dig_port = enc_to_dig_port(intel_encoder);
1285 		if (!intel_dp_mst_source_support(&dig_port->dp))
1286 			continue;
1287 
1288 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1289 			   dig_port->base.base.base.id,
1290 			   dig_port->base.base.name);
1291 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1292 	}
1293 	drm_connector_list_iter_end(&conn_iter);
1294 
1295 	return 0;
1296 }
1297 
1298 static ssize_t i915_displayport_test_active_write(struct file *file,
1299 						  const char __user *ubuf,
1300 						  size_t len, loff_t *offp)
1301 {
1302 	char *input_buffer;
1303 	int status = 0;
1304 	struct drm_device *dev;
1305 	struct drm_connector *connector;
1306 	struct drm_connector_list_iter conn_iter;
1307 	struct intel_dp *intel_dp;
1308 	int val = 0;
1309 
1310 	dev = ((struct seq_file *)file->private_data)->private;
1311 
1312 	if (len == 0)
1313 		return 0;
1314 
1315 	input_buffer = memdup_user_nul(ubuf, len);
1316 	if (IS_ERR(input_buffer))
1317 		return PTR_ERR(input_buffer);
1318 
1319 	drm_dbg(&to_i915(dev)->drm,
1320 		"Copied %d bytes from user\n", (unsigned int)len);
1321 
1322 	drm_connector_list_iter_begin(dev, &conn_iter);
1323 	drm_for_each_connector_iter(connector, &conn_iter) {
1324 		struct intel_encoder *encoder;
1325 
1326 		if (connector->connector_type !=
1327 		    DRM_MODE_CONNECTOR_DisplayPort)
1328 			continue;
1329 
1330 		encoder = to_intel_encoder(connector->encoder);
1331 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1332 			continue;
1333 
1334 		if (encoder && connector->status == connector_status_connected) {
1335 			intel_dp = enc_to_intel_dp(encoder);
1336 			status = kstrtoint(input_buffer, 10, &val);
1337 			if (status < 0)
1338 				break;
1339 			drm_dbg(&to_i915(dev)->drm,
1340 				"Got %d for test active\n", val);
1341 			/* To prevent erroneous activation of the compliance
1342 			 * testing code, only accept an actual value of 1 here
1343 			 */
1344 			if (val == 1)
1345 				intel_dp->compliance.test_active = true;
1346 			else
1347 				intel_dp->compliance.test_active = false;
1348 		}
1349 	}
1350 	drm_connector_list_iter_end(&conn_iter);
1351 	kfree(input_buffer);
1352 	if (status < 0)
1353 		return status;
1354 
1355 	*offp += len;
1356 	return len;
1357 }
1358 
1359 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1360 {
1361 	struct drm_i915_private *dev_priv = m->private;
1362 	struct drm_device *dev = &dev_priv->drm;
1363 	struct drm_connector *connector;
1364 	struct drm_connector_list_iter conn_iter;
1365 	struct intel_dp *intel_dp;
1366 
1367 	drm_connector_list_iter_begin(dev, &conn_iter);
1368 	drm_for_each_connector_iter(connector, &conn_iter) {
1369 		struct intel_encoder *encoder;
1370 
1371 		if (connector->connector_type !=
1372 		    DRM_MODE_CONNECTOR_DisplayPort)
1373 			continue;
1374 
1375 		encoder = to_intel_encoder(connector->encoder);
1376 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1377 			continue;
1378 
1379 		if (encoder && connector->status == connector_status_connected) {
1380 			intel_dp = enc_to_intel_dp(encoder);
1381 			if (intel_dp->compliance.test_active)
1382 				seq_puts(m, "1");
1383 			else
1384 				seq_puts(m, "0");
1385 		} else
1386 			seq_puts(m, "0");
1387 	}
1388 	drm_connector_list_iter_end(&conn_iter);
1389 
1390 	return 0;
1391 }
1392 
1393 static int i915_displayport_test_active_open(struct inode *inode,
1394 					     struct file *file)
1395 {
1396 	return single_open(file, i915_displayport_test_active_show,
1397 			   inode->i_private);
1398 }
1399 
1400 static const struct file_operations i915_displayport_test_active_fops = {
1401 	.owner = THIS_MODULE,
1402 	.open = i915_displayport_test_active_open,
1403 	.read = seq_read,
1404 	.llseek = seq_lseek,
1405 	.release = single_release,
1406 	.write = i915_displayport_test_active_write
1407 };
1408 
1409 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1410 {
1411 	struct drm_i915_private *dev_priv = m->private;
1412 	struct drm_device *dev = &dev_priv->drm;
1413 	struct drm_connector *connector;
1414 	struct drm_connector_list_iter conn_iter;
1415 	struct intel_dp *intel_dp;
1416 
1417 	drm_connector_list_iter_begin(dev, &conn_iter);
1418 	drm_for_each_connector_iter(connector, &conn_iter) {
1419 		struct intel_encoder *encoder;
1420 
1421 		if (connector->connector_type !=
1422 		    DRM_MODE_CONNECTOR_DisplayPort)
1423 			continue;
1424 
1425 		encoder = to_intel_encoder(connector->encoder);
1426 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1427 			continue;
1428 
1429 		if (encoder && connector->status == connector_status_connected) {
1430 			intel_dp = enc_to_intel_dp(encoder);
1431 			if (intel_dp->compliance.test_type ==
1432 			    DP_TEST_LINK_EDID_READ)
1433 				seq_printf(m, "%lx",
1434 					   intel_dp->compliance.test_data.edid);
1435 			else if (intel_dp->compliance.test_type ==
1436 				 DP_TEST_LINK_VIDEO_PATTERN) {
1437 				seq_printf(m, "hdisplay: %d\n",
1438 					   intel_dp->compliance.test_data.hdisplay);
1439 				seq_printf(m, "vdisplay: %d\n",
1440 					   intel_dp->compliance.test_data.vdisplay);
1441 				seq_printf(m, "bpc: %u\n",
1442 					   intel_dp->compliance.test_data.bpc);
1443 			} else if (intel_dp->compliance.test_type ==
1444 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1445 				seq_printf(m, "pattern: %d\n",
1446 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1447 				seq_printf(m, "Number of lanes: %d\n",
1448 					   intel_dp->compliance.test_data.phytest.num_lanes);
1449 				seq_printf(m, "Link Rate: %d\n",
1450 					   intel_dp->compliance.test_data.phytest.link_rate);
1451 				seq_printf(m, "level: %02x\n",
1452 					   intel_dp->train_set[0]);
1453 			}
1454 		} else
1455 			seq_puts(m, "0");
1456 	}
1457 	drm_connector_list_iter_end(&conn_iter);
1458 
1459 	return 0;
1460 }
1461 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1462 
1463 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1464 {
1465 	struct drm_i915_private *dev_priv = m->private;
1466 	struct drm_device *dev = &dev_priv->drm;
1467 	struct drm_connector *connector;
1468 	struct drm_connector_list_iter conn_iter;
1469 	struct intel_dp *intel_dp;
1470 
1471 	drm_connector_list_iter_begin(dev, &conn_iter);
1472 	drm_for_each_connector_iter(connector, &conn_iter) {
1473 		struct intel_encoder *encoder;
1474 
1475 		if (connector->connector_type !=
1476 		    DRM_MODE_CONNECTOR_DisplayPort)
1477 			continue;
1478 
1479 		encoder = to_intel_encoder(connector->encoder);
1480 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1481 			continue;
1482 
1483 		if (encoder && connector->status == connector_status_connected) {
1484 			intel_dp = enc_to_intel_dp(encoder);
1485 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1486 		} else
1487 			seq_puts(m, "0");
1488 	}
1489 	drm_connector_list_iter_end(&conn_iter);
1490 
1491 	return 0;
1492 }
1493 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1494 
1495 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1496 {
1497 	struct drm_i915_private *dev_priv = m->private;
1498 	struct drm_device *dev = &dev_priv->drm;
1499 	int level;
1500 	int num_levels;
1501 
1502 	if (IS_CHERRYVIEW(dev_priv))
1503 		num_levels = 3;
1504 	else if (IS_VALLEYVIEW(dev_priv))
1505 		num_levels = 1;
1506 	else if (IS_G4X(dev_priv))
1507 		num_levels = 3;
1508 	else
1509 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1510 
1511 	drm_modeset_lock_all(dev);
1512 
1513 	for (level = 0; level < num_levels; level++) {
1514 		unsigned int latency = wm[level];
1515 
1516 		/*
1517 		 * - WM1+ latency values in 0.5us units
1518 		 * - latencies are in us on gen9/vlv/chv
1519 		 */
1520 		if (DISPLAY_VER(dev_priv) >= 9 ||
1521 		    IS_VALLEYVIEW(dev_priv) ||
1522 		    IS_CHERRYVIEW(dev_priv) ||
1523 		    IS_G4X(dev_priv))
1524 			latency *= 10;
1525 		else if (level > 0)
1526 			latency *= 5;
1527 
1528 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1529 			   level, wm[level], latency / 10, latency % 10);
1530 	}
1531 
1532 	drm_modeset_unlock_all(dev);
1533 }
1534 
1535 static int pri_wm_latency_show(struct seq_file *m, void *data)
1536 {
1537 	struct drm_i915_private *dev_priv = m->private;
1538 	const u16 *latencies;
1539 
1540 	if (DISPLAY_VER(dev_priv) >= 9)
1541 		latencies = dev_priv->wm.skl_latency;
1542 	else
1543 		latencies = dev_priv->wm.pri_latency;
1544 
1545 	wm_latency_show(m, latencies);
1546 
1547 	return 0;
1548 }
1549 
1550 static int spr_wm_latency_show(struct seq_file *m, void *data)
1551 {
1552 	struct drm_i915_private *dev_priv = m->private;
1553 	const u16 *latencies;
1554 
1555 	if (DISPLAY_VER(dev_priv) >= 9)
1556 		latencies = dev_priv->wm.skl_latency;
1557 	else
1558 		latencies = dev_priv->wm.spr_latency;
1559 
1560 	wm_latency_show(m, latencies);
1561 
1562 	return 0;
1563 }
1564 
1565 static int cur_wm_latency_show(struct seq_file *m, void *data)
1566 {
1567 	struct drm_i915_private *dev_priv = m->private;
1568 	const u16 *latencies;
1569 
1570 	if (DISPLAY_VER(dev_priv) >= 9)
1571 		latencies = dev_priv->wm.skl_latency;
1572 	else
1573 		latencies = dev_priv->wm.cur_latency;
1574 
1575 	wm_latency_show(m, latencies);
1576 
1577 	return 0;
1578 }
1579 
1580 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1581 {
1582 	struct drm_i915_private *dev_priv = inode->i_private;
1583 
1584 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
1585 		return -ENODEV;
1586 
1587 	return single_open(file, pri_wm_latency_show, dev_priv);
1588 }
1589 
1590 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1591 {
1592 	struct drm_i915_private *dev_priv = inode->i_private;
1593 
1594 	if (HAS_GMCH(dev_priv))
1595 		return -ENODEV;
1596 
1597 	return single_open(file, spr_wm_latency_show, dev_priv);
1598 }
1599 
1600 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1601 {
1602 	struct drm_i915_private *dev_priv = inode->i_private;
1603 
1604 	if (HAS_GMCH(dev_priv))
1605 		return -ENODEV;
1606 
1607 	return single_open(file, cur_wm_latency_show, dev_priv);
1608 }
1609 
1610 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1611 				size_t len, loff_t *offp, u16 wm[8])
1612 {
1613 	struct seq_file *m = file->private_data;
1614 	struct drm_i915_private *dev_priv = m->private;
1615 	struct drm_device *dev = &dev_priv->drm;
1616 	u16 new[8] = { 0 };
1617 	int num_levels;
1618 	int level;
1619 	int ret;
1620 	char tmp[32];
1621 
1622 	if (IS_CHERRYVIEW(dev_priv))
1623 		num_levels = 3;
1624 	else if (IS_VALLEYVIEW(dev_priv))
1625 		num_levels = 1;
1626 	else if (IS_G4X(dev_priv))
1627 		num_levels = 3;
1628 	else
1629 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1630 
1631 	if (len >= sizeof(tmp))
1632 		return -EINVAL;
1633 
1634 	if (copy_from_user(tmp, ubuf, len))
1635 		return -EFAULT;
1636 
1637 	tmp[len] = '\0';
1638 
1639 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1640 		     &new[0], &new[1], &new[2], &new[3],
1641 		     &new[4], &new[5], &new[6], &new[7]);
1642 	if (ret != num_levels)
1643 		return -EINVAL;
1644 
1645 	drm_modeset_lock_all(dev);
1646 
1647 	for (level = 0; level < num_levels; level++)
1648 		wm[level] = new[level];
1649 
1650 	drm_modeset_unlock_all(dev);
1651 
1652 	return len;
1653 }
1654 
1655 
1656 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1657 				    size_t len, loff_t *offp)
1658 {
1659 	struct seq_file *m = file->private_data;
1660 	struct drm_i915_private *dev_priv = m->private;
1661 	u16 *latencies;
1662 
1663 	if (DISPLAY_VER(dev_priv) >= 9)
1664 		latencies = dev_priv->wm.skl_latency;
1665 	else
1666 		latencies = dev_priv->wm.pri_latency;
1667 
1668 	return wm_latency_write(file, ubuf, len, offp, latencies);
1669 }
1670 
1671 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1672 				    size_t len, loff_t *offp)
1673 {
1674 	struct seq_file *m = file->private_data;
1675 	struct drm_i915_private *dev_priv = m->private;
1676 	u16 *latencies;
1677 
1678 	if (DISPLAY_VER(dev_priv) >= 9)
1679 		latencies = dev_priv->wm.skl_latency;
1680 	else
1681 		latencies = dev_priv->wm.spr_latency;
1682 
1683 	return wm_latency_write(file, ubuf, len, offp, latencies);
1684 }
1685 
1686 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1687 				    size_t len, loff_t *offp)
1688 {
1689 	struct seq_file *m = file->private_data;
1690 	struct drm_i915_private *dev_priv = m->private;
1691 	u16 *latencies;
1692 
1693 	if (DISPLAY_VER(dev_priv) >= 9)
1694 		latencies = dev_priv->wm.skl_latency;
1695 	else
1696 		latencies = dev_priv->wm.cur_latency;
1697 
1698 	return wm_latency_write(file, ubuf, len, offp, latencies);
1699 }
1700 
1701 static const struct file_operations i915_pri_wm_latency_fops = {
1702 	.owner = THIS_MODULE,
1703 	.open = pri_wm_latency_open,
1704 	.read = seq_read,
1705 	.llseek = seq_lseek,
1706 	.release = single_release,
1707 	.write = pri_wm_latency_write
1708 };
1709 
1710 static const struct file_operations i915_spr_wm_latency_fops = {
1711 	.owner = THIS_MODULE,
1712 	.open = spr_wm_latency_open,
1713 	.read = seq_read,
1714 	.llseek = seq_lseek,
1715 	.release = single_release,
1716 	.write = spr_wm_latency_write
1717 };
1718 
1719 static const struct file_operations i915_cur_wm_latency_fops = {
1720 	.owner = THIS_MODULE,
1721 	.open = cur_wm_latency_open,
1722 	.read = seq_read,
1723 	.llseek = seq_lseek,
1724 	.release = single_release,
1725 	.write = cur_wm_latency_write
1726 };
1727 
1728 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1729 {
1730 	struct drm_i915_private *dev_priv = m->private;
1731 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1732 
1733 	/* Synchronize with everything first in case there's been an HPD
1734 	 * storm, but we haven't finished handling it in the kernel yet
1735 	 */
1736 	intel_synchronize_irq(dev_priv);
1737 	flush_work(&dev_priv->hotplug.dig_port_work);
1738 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1739 
1740 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1741 	seq_printf(m, "Detected: %s\n",
1742 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1743 
1744 	return 0;
1745 }
1746 
1747 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1748 					const char __user *ubuf, size_t len,
1749 					loff_t *offp)
1750 {
1751 	struct seq_file *m = file->private_data;
1752 	struct drm_i915_private *dev_priv = m->private;
1753 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1754 	unsigned int new_threshold;
1755 	int i;
1756 	char *newline;
1757 	char tmp[16];
1758 
1759 	if (len >= sizeof(tmp))
1760 		return -EINVAL;
1761 
1762 	if (copy_from_user(tmp, ubuf, len))
1763 		return -EFAULT;
1764 
1765 	tmp[len] = '\0';
1766 
1767 	/* Strip newline, if any */
1768 	newline = strchr(tmp, '\n');
1769 	if (newline)
1770 		*newline = '\0';
1771 
1772 	if (strcmp(tmp, "reset") == 0)
1773 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1774 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1775 		return -EINVAL;
1776 
1777 	if (new_threshold > 0)
1778 		drm_dbg_kms(&dev_priv->drm,
1779 			    "Setting HPD storm detection threshold to %d\n",
1780 			    new_threshold);
1781 	else
1782 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1783 
1784 	spin_lock_irq(&dev_priv->irq_lock);
1785 	hotplug->hpd_storm_threshold = new_threshold;
1786 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1787 	for_each_hpd_pin(i)
1788 		hotplug->stats[i].count = 0;
1789 	spin_unlock_irq(&dev_priv->irq_lock);
1790 
1791 	/* Re-enable hpd immediately if we were in an irq storm */
1792 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1793 
1794 	return len;
1795 }
1796 
1797 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1798 {
1799 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1800 }
1801 
1802 static const struct file_operations i915_hpd_storm_ctl_fops = {
1803 	.owner = THIS_MODULE,
1804 	.open = i915_hpd_storm_ctl_open,
1805 	.read = seq_read,
1806 	.llseek = seq_lseek,
1807 	.release = single_release,
1808 	.write = i915_hpd_storm_ctl_write
1809 };
1810 
1811 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1812 {
1813 	struct drm_i915_private *dev_priv = m->private;
1814 
1815 	seq_printf(m, "Enabled: %s\n",
1816 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1817 
1818 	return 0;
1819 }
1820 
1821 static int
1822 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1823 {
1824 	return single_open(file, i915_hpd_short_storm_ctl_show,
1825 			   inode->i_private);
1826 }
1827 
1828 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1829 					      const char __user *ubuf,
1830 					      size_t len, loff_t *offp)
1831 {
1832 	struct seq_file *m = file->private_data;
1833 	struct drm_i915_private *dev_priv = m->private;
1834 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1835 	char *newline;
1836 	char tmp[16];
1837 	int i;
1838 	bool new_state;
1839 
1840 	if (len >= sizeof(tmp))
1841 		return -EINVAL;
1842 
1843 	if (copy_from_user(tmp, ubuf, len))
1844 		return -EFAULT;
1845 
1846 	tmp[len] = '\0';
1847 
1848 	/* Strip newline, if any */
1849 	newline = strchr(tmp, '\n');
1850 	if (newline)
1851 		*newline = '\0';
1852 
1853 	/* Reset to the "default" state for this system */
1854 	if (strcmp(tmp, "reset") == 0)
1855 		new_state = !HAS_DP_MST(dev_priv);
1856 	else if (kstrtobool(tmp, &new_state) != 0)
1857 		return -EINVAL;
1858 
1859 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1860 		    new_state ? "En" : "Dis");
1861 
1862 	spin_lock_irq(&dev_priv->irq_lock);
1863 	hotplug->hpd_short_storm_enabled = new_state;
1864 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1865 	for_each_hpd_pin(i)
1866 		hotplug->stats[i].count = 0;
1867 	spin_unlock_irq(&dev_priv->irq_lock);
1868 
1869 	/* Re-enable hpd immediately if we were in an irq storm */
1870 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1871 
1872 	return len;
1873 }
1874 
1875 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1876 	.owner = THIS_MODULE,
1877 	.open = i915_hpd_short_storm_ctl_open,
1878 	.read = seq_read,
1879 	.llseek = seq_lseek,
1880 	.release = single_release,
1881 	.write = i915_hpd_short_storm_ctl_write,
1882 };
1883 
1884 static int i915_drrs_ctl_set(void *data, u64 val)
1885 {
1886 	struct drm_i915_private *dev_priv = data;
1887 	struct drm_device *dev = &dev_priv->drm;
1888 	struct intel_crtc *crtc;
1889 
1890 	if (DISPLAY_VER(dev_priv) < 7)
1891 		return -ENODEV;
1892 
1893 	for_each_intel_crtc(dev, crtc) {
1894 		struct drm_connector_list_iter conn_iter;
1895 		struct intel_crtc_state *crtc_state;
1896 		struct drm_connector *connector;
1897 		struct drm_crtc_commit *commit;
1898 		int ret;
1899 
1900 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1901 		if (ret)
1902 			return ret;
1903 
1904 		crtc_state = to_intel_crtc_state(crtc->base.state);
1905 
1906 		if (!crtc_state->hw.active ||
1907 		    !crtc_state->has_drrs)
1908 			goto out;
1909 
1910 		commit = crtc_state->uapi.commit;
1911 		if (commit) {
1912 			ret = wait_for_completion_interruptible(&commit->hw_done);
1913 			if (ret)
1914 				goto out;
1915 		}
1916 
1917 		drm_connector_list_iter_begin(dev, &conn_iter);
1918 		drm_for_each_connector_iter(connector, &conn_iter) {
1919 			struct intel_encoder *encoder;
1920 			struct intel_dp *intel_dp;
1921 
1922 			if (!(crtc_state->uapi.connector_mask &
1923 			      drm_connector_mask(connector)))
1924 				continue;
1925 
1926 			encoder = intel_attached_encoder(to_intel_connector(connector));
1927 			if (encoder->type != INTEL_OUTPUT_EDP)
1928 				continue;
1929 
1930 			drm_dbg(&dev_priv->drm,
1931 				"Manually %sabling DRRS. %llu\n",
1932 				val ? "en" : "dis", val);
1933 
1934 			intel_dp = enc_to_intel_dp(encoder);
1935 			if (val)
1936 				intel_drrs_enable(intel_dp, crtc_state);
1937 			else
1938 				intel_drrs_disable(intel_dp, crtc_state);
1939 		}
1940 		drm_connector_list_iter_end(&conn_iter);
1941 
1942 out:
1943 		drm_modeset_unlock(&crtc->base.mutex);
1944 		if (ret)
1945 			return ret;
1946 	}
1947 
1948 	return 0;
1949 }
1950 
1951 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1952 
1953 static ssize_t
1954 i915_fifo_underrun_reset_write(struct file *filp,
1955 			       const char __user *ubuf,
1956 			       size_t cnt, loff_t *ppos)
1957 {
1958 	struct drm_i915_private *dev_priv = filp->private_data;
1959 	struct intel_crtc *crtc;
1960 	struct drm_device *dev = &dev_priv->drm;
1961 	int ret;
1962 	bool reset;
1963 
1964 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1965 	if (ret)
1966 		return ret;
1967 
1968 	if (!reset)
1969 		return cnt;
1970 
1971 	for_each_intel_crtc(dev, crtc) {
1972 		struct drm_crtc_commit *commit;
1973 		struct intel_crtc_state *crtc_state;
1974 
1975 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1976 		if (ret)
1977 			return ret;
1978 
1979 		crtc_state = to_intel_crtc_state(crtc->base.state);
1980 		commit = crtc_state->uapi.commit;
1981 		if (commit) {
1982 			ret = wait_for_completion_interruptible(&commit->hw_done);
1983 			if (!ret)
1984 				ret = wait_for_completion_interruptible(&commit->flip_done);
1985 		}
1986 
1987 		if (!ret && crtc_state->hw.active) {
1988 			drm_dbg_kms(&dev_priv->drm,
1989 				    "Re-arming FIFO underruns on pipe %c\n",
1990 				    pipe_name(crtc->pipe));
1991 
1992 			intel_crtc_arm_fifo_underrun(crtc, crtc_state);
1993 		}
1994 
1995 		drm_modeset_unlock(&crtc->base.mutex);
1996 
1997 		if (ret)
1998 			return ret;
1999 	}
2000 
2001 	intel_fbc_reset_underrun(dev_priv);
2002 
2003 	return cnt;
2004 }
2005 
2006 static const struct file_operations i915_fifo_underrun_reset_ops = {
2007 	.owner = THIS_MODULE,
2008 	.open = simple_open,
2009 	.write = i915_fifo_underrun_reset_write,
2010 	.llseek = default_llseek,
2011 };
2012 
2013 static const struct drm_info_list intel_display_debugfs_list[] = {
2014 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
2015 	{"i915_ips_status", i915_ips_status, 0},
2016 	{"i915_sr_status", i915_sr_status, 0},
2017 	{"i915_opregion", i915_opregion, 0},
2018 	{"i915_vbt", i915_vbt, 0},
2019 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2020 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
2021 	{"i915_power_domain_info", i915_power_domain_info, 0},
2022 	{"i915_dmc_info", i915_dmc_info, 0},
2023 	{"i915_display_info", i915_display_info, 0},
2024 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
2025 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
2026 	{"i915_ddb_info", i915_ddb_info, 0},
2027 	{"i915_drrs_status", i915_drrs_status, 0},
2028 	{"i915_lpsp_status", i915_lpsp_status, 0},
2029 };
2030 
2031 static const struct {
2032 	const char *name;
2033 	const struct file_operations *fops;
2034 } intel_display_debugfs_files[] = {
2035 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2036 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2037 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2038 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2039 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
2040 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
2041 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
2042 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2043 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2044 	{"i915_ipc_status", &i915_ipc_status_fops},
2045 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
2046 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2047 };
2048 
2049 void intel_display_debugfs_register(struct drm_i915_private *i915)
2050 {
2051 	struct drm_minor *minor = i915->drm.primary;
2052 	int i;
2053 
2054 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2055 		debugfs_create_file(intel_display_debugfs_files[i].name,
2056 				    S_IRUGO | S_IWUSR,
2057 				    minor->debugfs_root,
2058 				    to_i915(minor->dev),
2059 				    intel_display_debugfs_files[i].fops);
2060 	}
2061 
2062 	drm_debugfs_create_files(intel_display_debugfs_list,
2063 				 ARRAY_SIZE(intel_display_debugfs_list),
2064 				 minor->debugfs_root, minor);
2065 
2066 	intel_fbc_debugfs_register(i915);
2067 }
2068 
2069 static int i915_panel_show(struct seq_file *m, void *data)
2070 {
2071 	struct drm_connector *connector = m->private;
2072 	struct intel_dp *intel_dp =
2073 		intel_attached_dp(to_intel_connector(connector));
2074 
2075 	if (connector->status != connector_status_connected)
2076 		return -ENODEV;
2077 
2078 	seq_printf(m, "Panel power up delay: %d\n",
2079 		   intel_dp->pps.panel_power_up_delay);
2080 	seq_printf(m, "Panel power down delay: %d\n",
2081 		   intel_dp->pps.panel_power_down_delay);
2082 	seq_printf(m, "Backlight on delay: %d\n",
2083 		   intel_dp->pps.backlight_on_delay);
2084 	seq_printf(m, "Backlight off delay: %d\n",
2085 		   intel_dp->pps.backlight_off_delay);
2086 
2087 	return 0;
2088 }
2089 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2090 
2091 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2092 {
2093 	struct drm_connector *connector = m->private;
2094 	struct drm_i915_private *i915 = to_i915(connector->dev);
2095 	struct intel_connector *intel_connector = to_intel_connector(connector);
2096 	int ret;
2097 
2098 	ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
2099 	if (ret)
2100 		return ret;
2101 
2102 	if (!connector->encoder || connector->status != connector_status_connected) {
2103 		ret = -ENODEV;
2104 		goto out;
2105 	}
2106 
2107 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2108 		   connector->base.id);
2109 	intel_hdcp_info(m, intel_connector);
2110 
2111 out:
2112 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
2113 
2114 	return ret;
2115 }
2116 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2117 
2118 static int i915_psr_status_show(struct seq_file *m, void *data)
2119 {
2120 	struct drm_connector *connector = m->private;
2121 	struct intel_dp *intel_dp =
2122 		intel_attached_dp(to_intel_connector(connector));
2123 
2124 	return intel_psr_status(m, intel_dp);
2125 }
2126 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2127 
2128 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2129 {
2130 	struct drm_connector *connector = m->private;
2131 	struct drm_i915_private *i915 = to_i915(connector->dev);
2132 	struct intel_encoder *encoder;
2133 	bool lpsp_capable = false;
2134 
2135 	encoder = intel_attached_encoder(to_intel_connector(connector));
2136 	if (!encoder)
2137 		return -ENODEV;
2138 
2139 	if (connector->status != connector_status_connected)
2140 		return -ENODEV;
2141 
2142 	if (DISPLAY_VER(i915) >= 13)
2143 		lpsp_capable = encoder->port <= PORT_B;
2144 	else if (DISPLAY_VER(i915) >= 12)
2145 		/*
2146 		 * Actually TGL can drive LPSP on port till DDI_C
2147 		 * but there is no physical connected DDI_C on TGL sku's,
2148 		 * even driver is not initilizing DDI_C port for gen12.
2149 		 */
2150 		lpsp_capable = encoder->port <= PORT_B;
2151 	else if (DISPLAY_VER(i915) == 11)
2152 		lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2153 				connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2154 	else if (IS_DISPLAY_VER(i915, 9, 10))
2155 		lpsp_capable = (encoder->port == PORT_A &&
2156 				(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2157 				 connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2158 				 connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2159 	else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2160 		lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP;
2161 
2162 	seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
2163 
2164 	return 0;
2165 }
2166 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2167 
2168 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2169 {
2170 	struct drm_connector *connector = m->private;
2171 	struct drm_device *dev = connector->dev;
2172 	struct drm_crtc *crtc;
2173 	struct intel_dp *intel_dp;
2174 	struct drm_modeset_acquire_ctx ctx;
2175 	struct intel_crtc_state *crtc_state = NULL;
2176 	int ret = 0;
2177 	bool try_again = false;
2178 
2179 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2180 
2181 	do {
2182 		try_again = false;
2183 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2184 				       &ctx);
2185 		if (ret) {
2186 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2187 				try_again = true;
2188 				continue;
2189 			}
2190 			break;
2191 		}
2192 		crtc = connector->state->crtc;
2193 		if (connector->status != connector_status_connected || !crtc) {
2194 			ret = -ENODEV;
2195 			break;
2196 		}
2197 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2198 		if (ret == -EDEADLK) {
2199 			ret = drm_modeset_backoff(&ctx);
2200 			if (!ret) {
2201 				try_again = true;
2202 				continue;
2203 			}
2204 			break;
2205 		} else if (ret) {
2206 			break;
2207 		}
2208 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2209 		crtc_state = to_intel_crtc_state(crtc->state);
2210 		seq_printf(m, "DSC_Enabled: %s\n",
2211 			   yesno(crtc_state->dsc.compression_enable));
2212 		seq_printf(m, "DSC_Sink_Support: %s\n",
2213 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2214 		seq_printf(m, "Force_DSC_Enable: %s\n",
2215 			   yesno(intel_dp->force_dsc_en));
2216 		if (!intel_dp_is_edp(intel_dp))
2217 			seq_printf(m, "FEC_Sink_Support: %s\n",
2218 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2219 	} while (try_again);
2220 
2221 	drm_modeset_drop_locks(&ctx);
2222 	drm_modeset_acquire_fini(&ctx);
2223 
2224 	return ret;
2225 }
2226 
2227 static ssize_t i915_dsc_fec_support_write(struct file *file,
2228 					  const char __user *ubuf,
2229 					  size_t len, loff_t *offp)
2230 {
2231 	bool dsc_enable = false;
2232 	int ret;
2233 	struct drm_connector *connector =
2234 		((struct seq_file *)file->private_data)->private;
2235 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2236 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2237 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2238 
2239 	if (len == 0)
2240 		return 0;
2241 
2242 	drm_dbg(&i915->drm,
2243 		"Copied %zu bytes from user to force DSC\n", len);
2244 
2245 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2246 	if (ret < 0)
2247 		return ret;
2248 
2249 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2250 		(dsc_enable) ? "true" : "false");
2251 	intel_dp->force_dsc_en = dsc_enable;
2252 
2253 	*offp += len;
2254 	return len;
2255 }
2256 
2257 static int i915_dsc_fec_support_open(struct inode *inode,
2258 				     struct file *file)
2259 {
2260 	return single_open(file, i915_dsc_fec_support_show,
2261 			   inode->i_private);
2262 }
2263 
2264 static const struct file_operations i915_dsc_fec_support_fops = {
2265 	.owner = THIS_MODULE,
2266 	.open = i915_dsc_fec_support_open,
2267 	.read = seq_read,
2268 	.llseek = seq_lseek,
2269 	.release = single_release,
2270 	.write = i915_dsc_fec_support_write
2271 };
2272 
2273 static int i915_dsc_bpp_show(struct seq_file *m, void *data)
2274 {
2275 	struct drm_connector *connector = m->private;
2276 	struct drm_device *dev = connector->dev;
2277 	struct drm_crtc *crtc;
2278 	struct intel_crtc_state *crtc_state;
2279 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2280 	int ret;
2281 
2282 	if (!encoder)
2283 		return -ENODEV;
2284 
2285 	ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
2286 	if (ret)
2287 		return ret;
2288 
2289 	crtc = connector->state->crtc;
2290 	if (connector->status != connector_status_connected || !crtc) {
2291 		ret = -ENODEV;
2292 		goto out;
2293 	}
2294 
2295 	crtc_state = to_intel_crtc_state(crtc->state);
2296 	seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp);
2297 
2298 out:	drm_modeset_unlock(&dev->mode_config.connection_mutex);
2299 
2300 	return ret;
2301 }
2302 
2303 static ssize_t i915_dsc_bpp_write(struct file *file,
2304 				  const char __user *ubuf,
2305 				  size_t len, loff_t *offp)
2306 {
2307 	struct drm_connector *connector =
2308 		((struct seq_file *)file->private_data)->private;
2309 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2310 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2311 	int dsc_bpp = 0;
2312 	int ret;
2313 
2314 	ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp);
2315 	if (ret < 0)
2316 		return ret;
2317 
2318 	intel_dp->force_dsc_bpp = dsc_bpp;
2319 	*offp += len;
2320 
2321 	return len;
2322 }
2323 
2324 static int i915_dsc_bpp_open(struct inode *inode,
2325 			     struct file *file)
2326 {
2327 	return single_open(file, i915_dsc_bpp_show,
2328 			   inode->i_private);
2329 }
2330 
2331 static const struct file_operations i915_dsc_bpp_fops = {
2332 	.owner = THIS_MODULE,
2333 	.open = i915_dsc_bpp_open,
2334 	.read = seq_read,
2335 	.llseek = seq_lseek,
2336 	.release = single_release,
2337 	.write = i915_dsc_bpp_write
2338 };
2339 
2340 /**
2341  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2342  * @connector: pointer to a registered drm_connector
2343  *
2344  * Cleanup will be done by drm_connector_unregister() through a call to
2345  * drm_debugfs_connector_remove().
2346  */
2347 void intel_connector_debugfs_add(struct intel_connector *intel_connector)
2348 {
2349 	struct drm_connector *connector = &intel_connector->base;
2350 	struct dentry *root = connector->debugfs_entry;
2351 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2352 
2353 	/* The connector must have been registered beforehands. */
2354 	if (!root)
2355 		return;
2356 
2357 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2358 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2359 				    connector, &i915_panel_fops);
2360 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2361 				    connector, &i915_psr_sink_status_fops);
2362 	}
2363 
2364 	if (HAS_PSR(dev_priv) &&
2365 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2366 		debugfs_create_file("i915_psr_status", 0444, root,
2367 				    connector, &i915_psr_status_fops);
2368 	}
2369 
2370 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2371 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2372 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2373 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2374 				    connector, &i915_hdcp_sink_capability_fops);
2375 	}
2376 
2377 	if (DISPLAY_VER(dev_priv) >= 11 &&
2378 	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2379 	    !to_intel_connector(connector)->mst_port) ||
2380 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
2381 		debugfs_create_file("i915_dsc_fec_support", 0644, root,
2382 				    connector, &i915_dsc_fec_support_fops);
2383 
2384 		debugfs_create_file("i915_dsc_bpp", 0644, root,
2385 				    connector, &i915_dsc_bpp_fops);
2386 	}
2387 
2388 	if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2389 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2390 	    connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2391 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2392 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
2393 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2394 				    connector, &i915_lpsp_capability_fops);
2395 }
2396 
2397 /**
2398  * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
2399  * @crtc: pointer to a drm_crtc
2400  *
2401  * Failure to add debugfs entries should generally be ignored.
2402  */
2403 void intel_crtc_debugfs_add(struct drm_crtc *crtc)
2404 {
2405 	if (crtc->debugfs_entry)
2406 		crtc_updates_add(crtc);
2407 }
2408