1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_display_debugfs.h"
11 #include "intel_display_power.h"
12 #include "intel_de.h"
13 #include "intel_display_types.h"
14 #include "intel_dmc.h"
15 #include "intel_dp.h"
16 #include "intel_fbc.h"
17 #include "intel_hdcp.h"
18 #include "intel_hdmi.h"
19 #include "intel_pm.h"
20 #include "intel_psr.h"
21 #include "intel_sideband.h"
22 #include "intel_sprite.h"
23 
24 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
25 {
26 	return to_i915(node->minor->dev);
27 }
28 
29 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
30 {
31 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
32 
33 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
34 		   dev_priv->fb_tracking.busy_bits);
35 
36 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
37 		   dev_priv->fb_tracking.flip_bits);
38 
39 	return 0;
40 }
41 
42 static int i915_fbc_status(struct seq_file *m, void *unused)
43 {
44 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
45 	struct intel_fbc *fbc = &dev_priv->fbc;
46 	intel_wakeref_t wakeref;
47 
48 	if (!HAS_FBC(dev_priv))
49 		return -ENODEV;
50 
51 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
52 	mutex_lock(&fbc->lock);
53 
54 	if (intel_fbc_is_active(dev_priv))
55 		seq_puts(m, "FBC enabled\n");
56 	else
57 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
58 
59 	if (intel_fbc_is_active(dev_priv)) {
60 		u32 mask;
61 
62 		if (DISPLAY_VER(dev_priv) >= 8)
63 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
64 		else if (DISPLAY_VER(dev_priv) >= 7)
65 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
66 		else if (DISPLAY_VER(dev_priv) >= 5)
67 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
68 		else if (IS_G4X(dev_priv))
69 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
70 		else
71 			mask = intel_de_read(dev_priv, FBC_STATUS) &
72 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
73 
74 		seq_printf(m, "Compressing: %s\n", yesno(mask));
75 	}
76 
77 	mutex_unlock(&fbc->lock);
78 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
79 
80 	return 0;
81 }
82 
83 static int i915_fbc_false_color_get(void *data, u64 *val)
84 {
85 	struct drm_i915_private *dev_priv = data;
86 
87 	if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
88 		return -ENODEV;
89 
90 	*val = dev_priv->fbc.false_color;
91 
92 	return 0;
93 }
94 
95 static int i915_fbc_false_color_set(void *data, u64 val)
96 {
97 	struct drm_i915_private *dev_priv = data;
98 	u32 reg;
99 
100 	if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
101 		return -ENODEV;
102 
103 	mutex_lock(&dev_priv->fbc.lock);
104 
105 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
106 	dev_priv->fbc.false_color = val;
107 
108 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
109 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
110 
111 	mutex_unlock(&dev_priv->fbc.lock);
112 	return 0;
113 }
114 
115 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
116 			i915_fbc_false_color_get, i915_fbc_false_color_set,
117 			"%llu\n");
118 
119 static int i915_ips_status(struct seq_file *m, void *unused)
120 {
121 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
122 	intel_wakeref_t wakeref;
123 
124 	if (!HAS_IPS(dev_priv))
125 		return -ENODEV;
126 
127 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
128 
129 	seq_printf(m, "Enabled by kernel parameter: %s\n",
130 		   yesno(dev_priv->params.enable_ips));
131 
132 	if (DISPLAY_VER(dev_priv) >= 8) {
133 		seq_puts(m, "Currently: unknown\n");
134 	} else {
135 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
136 			seq_puts(m, "Currently: enabled\n");
137 		else
138 			seq_puts(m, "Currently: disabled\n");
139 	}
140 
141 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
142 
143 	return 0;
144 }
145 
146 static int i915_sr_status(struct seq_file *m, void *unused)
147 {
148 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
149 	intel_wakeref_t wakeref;
150 	bool sr_enabled = false;
151 
152 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
153 
154 	if (DISPLAY_VER(dev_priv) >= 9)
155 		/* no global SR status; inspect per-plane WM */;
156 	else if (HAS_PCH_SPLIT(dev_priv))
157 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
158 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
159 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
160 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
161 	else if (IS_I915GM(dev_priv))
162 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
163 	else if (IS_PINEVIEW(dev_priv))
164 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
165 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
166 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
167 
168 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
169 
170 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
171 
172 	return 0;
173 }
174 
175 static int i915_opregion(struct seq_file *m, void *unused)
176 {
177 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
178 
179 	if (opregion->header)
180 		seq_write(m, opregion->header, OPREGION_SIZE);
181 
182 	return 0;
183 }
184 
185 static int i915_vbt(struct seq_file *m, void *unused)
186 {
187 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
188 
189 	if (opregion->vbt)
190 		seq_write(m, opregion->vbt, opregion->vbt_size);
191 
192 	return 0;
193 }
194 
195 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
196 {
197 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
198 	struct drm_device *dev = &dev_priv->drm;
199 	struct intel_framebuffer *fbdev_fb = NULL;
200 	struct drm_framebuffer *drm_fb;
201 
202 #ifdef CONFIG_DRM_FBDEV_EMULATION
203 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
204 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
205 
206 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
207 			   fbdev_fb->base.width,
208 			   fbdev_fb->base.height,
209 			   fbdev_fb->base.format->depth,
210 			   fbdev_fb->base.format->cpp[0] * 8,
211 			   fbdev_fb->base.modifier,
212 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
213 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
214 		seq_putc(m, '\n');
215 	}
216 #endif
217 
218 	mutex_lock(&dev->mode_config.fb_lock);
219 	drm_for_each_fb(drm_fb, dev) {
220 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
221 		if (fb == fbdev_fb)
222 			continue;
223 
224 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
225 			   fb->base.width,
226 			   fb->base.height,
227 			   fb->base.format->depth,
228 			   fb->base.format->cpp[0] * 8,
229 			   fb->base.modifier,
230 			   drm_framebuffer_read_refcount(&fb->base));
231 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
232 		seq_putc(m, '\n');
233 	}
234 	mutex_unlock(&dev->mode_config.fb_lock);
235 
236 	return 0;
237 }
238 
239 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
240 {
241 	u8 val;
242 	static const char * const sink_status[] = {
243 		"inactive",
244 		"transition to active, capture and display",
245 		"active, display from RFB",
246 		"active, capture and display on sink device timings",
247 		"transition to inactive, capture and display, timing re-sync",
248 		"reserved",
249 		"reserved",
250 		"sink internal error",
251 	};
252 	struct drm_connector *connector = m->private;
253 	struct intel_dp *intel_dp =
254 		intel_attached_dp(to_intel_connector(connector));
255 	int ret;
256 
257 	if (!CAN_PSR(intel_dp)) {
258 		seq_puts(m, "PSR Unsupported\n");
259 		return -ENODEV;
260 	}
261 
262 	if (connector->status != connector_status_connected)
263 		return -ENODEV;
264 
265 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
266 
267 	if (ret == 1) {
268 		const char *str = "unknown";
269 
270 		val &= DP_PSR_SINK_STATE_MASK;
271 		if (val < ARRAY_SIZE(sink_status))
272 			str = sink_status[val];
273 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
274 	} else {
275 		return ret;
276 	}
277 
278 	return 0;
279 }
280 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
281 
282 static void
283 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
284 {
285 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
286 	const char *status = "unknown";
287 	u32 val, status_val;
288 
289 	if (intel_dp->psr.psr2_enabled) {
290 		static const char * const live_status[] = {
291 			"IDLE",
292 			"CAPTURE",
293 			"CAPTURE_FS",
294 			"SLEEP",
295 			"BUFON_FW",
296 			"ML_UP",
297 			"SU_STANDBY",
298 			"FAST_SLEEP",
299 			"DEEP_SLEEP",
300 			"BUF_ON",
301 			"TG_ON"
302 		};
303 		val = intel_de_read(dev_priv,
304 				    EDP_PSR2_STATUS(intel_dp->psr.transcoder));
305 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
306 			      EDP_PSR2_STATUS_STATE_SHIFT;
307 		if (status_val < ARRAY_SIZE(live_status))
308 			status = live_status[status_val];
309 	} else {
310 		static const char * const live_status[] = {
311 			"IDLE",
312 			"SRDONACK",
313 			"SRDENT",
314 			"BUFOFF",
315 			"BUFON",
316 			"AUXACK",
317 			"SRDOFFACK",
318 			"SRDENT_ON",
319 		};
320 		val = intel_de_read(dev_priv,
321 				    EDP_PSR_STATUS(intel_dp->psr.transcoder));
322 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
323 			      EDP_PSR_STATUS_STATE_SHIFT;
324 		if (status_val < ARRAY_SIZE(live_status))
325 			status = live_status[status_val];
326 	}
327 
328 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
329 }
330 
331 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
332 {
333 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
334 	struct intel_psr *psr = &intel_dp->psr;
335 	intel_wakeref_t wakeref;
336 	const char *status;
337 	bool enabled;
338 	u32 val;
339 
340 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
341 	if (psr->sink_support)
342 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
343 	seq_puts(m, "\n");
344 
345 	if (!psr->sink_support)
346 		return 0;
347 
348 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
349 	mutex_lock(&psr->lock);
350 
351 	if (psr->enabled)
352 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
353 	else
354 		status = "disabled";
355 	seq_printf(m, "PSR mode: %s\n", status);
356 
357 	if (!psr->enabled) {
358 		seq_printf(m, "PSR sink not reliable: %s\n",
359 			   yesno(psr->sink_not_reliable));
360 
361 		goto unlock;
362 	}
363 
364 	if (psr->psr2_enabled) {
365 		val = intel_de_read(dev_priv,
366 				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
367 		enabled = val & EDP_PSR2_ENABLE;
368 	} else {
369 		val = intel_de_read(dev_priv,
370 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
371 		enabled = val & EDP_PSR_ENABLE;
372 	}
373 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
374 		   enableddisabled(enabled), val);
375 	psr_source_status(intel_dp, m);
376 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
377 		   psr->busy_frontbuffer_bits);
378 
379 	/*
380 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
381 	 */
382 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
383 		val = intel_de_read(dev_priv,
384 				    EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
385 		val &= EDP_PSR_PERF_CNT_MASK;
386 		seq_printf(m, "Performance counter: %u\n", val);
387 	}
388 
389 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
390 		seq_printf(m, "Last attempted entry at: %lld\n",
391 			   psr->last_entry_attempt);
392 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
393 	}
394 
395 	if (psr->psr2_enabled) {
396 		u32 su_frames_val[3];
397 		int frame;
398 
399 		/*
400 		 * Reading all 3 registers before hand to minimize crossing a
401 		 * frame boundary between register reads
402 		 */
403 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
404 			val = intel_de_read(dev_priv,
405 					    PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
406 			su_frames_val[frame / 3] = val;
407 		}
408 
409 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
410 
411 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
412 			u32 su_blocks;
413 
414 			su_blocks = su_frames_val[frame / 3] &
415 				    PSR2_SU_STATUS_MASK(frame);
416 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
417 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
418 		}
419 
420 		seq_printf(m, "PSR2 selective fetch: %s\n",
421 			   enableddisabled(psr->psr2_sel_fetch_enabled));
422 	}
423 
424 unlock:
425 	mutex_unlock(&psr->lock);
426 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
427 
428 	return 0;
429 }
430 
431 static int i915_edp_psr_status(struct seq_file *m, void *data)
432 {
433 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
434 	struct intel_dp *intel_dp = NULL;
435 	struct intel_encoder *encoder;
436 
437 	if (!HAS_PSR(dev_priv))
438 		return -ENODEV;
439 
440 	/* Find the first EDP which supports PSR */
441 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
442 		intel_dp = enc_to_intel_dp(encoder);
443 		break;
444 	}
445 
446 	if (!intel_dp)
447 		return -ENODEV;
448 
449 	return intel_psr_status(m, intel_dp);
450 }
451 
452 static int
453 i915_edp_psr_debug_set(void *data, u64 val)
454 {
455 	struct drm_i915_private *dev_priv = data;
456 	struct intel_encoder *encoder;
457 	intel_wakeref_t wakeref;
458 	int ret = -ENODEV;
459 
460 	if (!HAS_PSR(dev_priv))
461 		return ret;
462 
463 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
464 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
465 
466 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
467 
468 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
469 
470 		// TODO: split to each transcoder's PSR debug state
471 		ret = intel_psr_debug_set(intel_dp, val);
472 
473 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
474 	}
475 
476 	return ret;
477 }
478 
479 static int
480 i915_edp_psr_debug_get(void *data, u64 *val)
481 {
482 	struct drm_i915_private *dev_priv = data;
483 	struct intel_encoder *encoder;
484 
485 	if (!HAS_PSR(dev_priv))
486 		return -ENODEV;
487 
488 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
489 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
490 
491 		// TODO: split to each transcoder's PSR debug state
492 		*val = READ_ONCE(intel_dp->psr.debug);
493 		return 0;
494 	}
495 
496 	return -ENODEV;
497 }
498 
499 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
500 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
501 			"%llu\n");
502 
503 static int i915_power_domain_info(struct seq_file *m, void *unused)
504 {
505 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
506 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
507 	int i;
508 
509 	mutex_lock(&power_domains->lock);
510 
511 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
512 	for (i = 0; i < power_domains->power_well_count; i++) {
513 		struct i915_power_well *power_well;
514 		enum intel_display_power_domain power_domain;
515 
516 		power_well = &power_domains->power_wells[i];
517 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
518 			   power_well->count);
519 
520 		for_each_power_domain(power_domain, power_well->desc->domains)
521 			seq_printf(m, "  %-23s %d\n",
522 				 intel_display_power_domain_str(power_domain),
523 				 power_domains->domain_use_count[power_domain]);
524 	}
525 
526 	mutex_unlock(&power_domains->lock);
527 
528 	return 0;
529 }
530 
531 static int i915_dmc_info(struct seq_file *m, void *unused)
532 {
533 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
534 	intel_wakeref_t wakeref;
535 	struct intel_dmc *dmc;
536 	i915_reg_t dc5_reg, dc6_reg = {};
537 
538 	if (!HAS_DMC(dev_priv))
539 		return -ENODEV;
540 
541 	dmc = &dev_priv->dmc;
542 
543 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
544 
545 	seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
546 	seq_printf(m, "path: %s\n", dmc->fw_path);
547 	seq_printf(m, "Pipe A fw support: %s\n",
548 		   yesno(GRAPHICS_VER(dev_priv) >= 12));
549 	seq_printf(m, "Pipe A fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEA].payload));
550 	seq_printf(m, "Pipe B fw support: %s\n", yesno(IS_ALDERLAKE_P(dev_priv)));
551 	seq_printf(m, "Pipe B fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEB].payload));
552 
553 	if (!intel_dmc_has_payload(dev_priv))
554 		goto out;
555 
556 	seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
557 		   DMC_VERSION_MINOR(dmc->version));
558 
559 	if (DISPLAY_VER(dev_priv) >= 12) {
560 		if (IS_DGFX(dev_priv)) {
561 			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
562 		} else {
563 			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
564 			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
565 		}
566 
567 		/*
568 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
569 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
570 		 * reg for DC3CO debugging and validation,
571 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
572 		 */
573 		seq_printf(m, "DC3CO count: %d\n",
574 			   intel_de_read(dev_priv, DMC_DEBUG3));
575 	} else {
576 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
577 						 SKL_DMC_DC3_DC5_COUNT;
578 		if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv))
579 			dc6_reg = SKL_DMC_DC5_DC6_COUNT;
580 	}
581 
582 	seq_printf(m, "DC3 -> DC5 count: %d\n",
583 		   intel_de_read(dev_priv, dc5_reg));
584 	if (dc6_reg.reg)
585 		seq_printf(m, "DC5 -> DC6 count: %d\n",
586 			   intel_de_read(dev_priv, dc6_reg));
587 
588 out:
589 	seq_printf(m, "program base: 0x%08x\n",
590 		   intel_de_read(dev_priv, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
591 	seq_printf(m, "ssp base: 0x%08x\n",
592 		   intel_de_read(dev_priv, DMC_SSP_BASE));
593 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
594 
595 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
596 
597 	return 0;
598 }
599 
600 static void intel_seq_print_mode(struct seq_file *m, int tabs,
601 				 const struct drm_display_mode *mode)
602 {
603 	int i;
604 
605 	for (i = 0; i < tabs; i++)
606 		seq_putc(m, '\t');
607 
608 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
609 }
610 
611 static void intel_encoder_info(struct seq_file *m,
612 			       struct intel_crtc *crtc,
613 			       struct intel_encoder *encoder)
614 {
615 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
616 	struct drm_connector_list_iter conn_iter;
617 	struct drm_connector *connector;
618 
619 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
620 		   encoder->base.base.id, encoder->base.name);
621 
622 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
623 	drm_for_each_connector_iter(connector, &conn_iter) {
624 		const struct drm_connector_state *conn_state =
625 			connector->state;
626 
627 		if (conn_state->best_encoder != &encoder->base)
628 			continue;
629 
630 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
631 			   connector->base.id, connector->name);
632 	}
633 	drm_connector_list_iter_end(&conn_iter);
634 }
635 
636 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
637 {
638 	const struct drm_display_mode *mode = panel->fixed_mode;
639 
640 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
641 }
642 
643 static void intel_hdcp_info(struct seq_file *m,
644 			    struct intel_connector *intel_connector)
645 {
646 	bool hdcp_cap, hdcp2_cap;
647 
648 	if (!intel_connector->hdcp.shim) {
649 		seq_puts(m, "No Connector Support");
650 		goto out;
651 	}
652 
653 	hdcp_cap = intel_hdcp_capable(intel_connector);
654 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
655 
656 	if (hdcp_cap)
657 		seq_puts(m, "HDCP1.4 ");
658 	if (hdcp2_cap)
659 		seq_puts(m, "HDCP2.2 ");
660 
661 	if (!hdcp_cap && !hdcp2_cap)
662 		seq_puts(m, "None");
663 
664 out:
665 	seq_puts(m, "\n");
666 }
667 
668 static void intel_dp_info(struct seq_file *m,
669 			  struct intel_connector *intel_connector)
670 {
671 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
672 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
673 	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
674 
675 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
676 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
677 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
678 		intel_panel_info(m, &intel_connector->panel);
679 
680 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
681 				edid ? edid->data : NULL, &intel_dp->aux);
682 }
683 
684 static void intel_dp_mst_info(struct seq_file *m,
685 			      struct intel_connector *intel_connector)
686 {
687 	bool has_audio = intel_connector->port->has_audio;
688 
689 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
690 }
691 
692 static void intel_hdmi_info(struct seq_file *m,
693 			    struct intel_connector *intel_connector)
694 {
695 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
696 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
697 
698 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
699 }
700 
701 static void intel_lvds_info(struct seq_file *m,
702 			    struct intel_connector *intel_connector)
703 {
704 	intel_panel_info(m, &intel_connector->panel);
705 }
706 
707 static void intel_connector_info(struct seq_file *m,
708 				 struct drm_connector *connector)
709 {
710 	struct intel_connector *intel_connector = to_intel_connector(connector);
711 	const struct drm_connector_state *conn_state = connector->state;
712 	struct intel_encoder *encoder =
713 		to_intel_encoder(conn_state->best_encoder);
714 	const struct drm_display_mode *mode;
715 
716 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
717 		   connector->base.id, connector->name,
718 		   drm_get_connector_status_name(connector->status));
719 
720 	if (connector->status == connector_status_disconnected)
721 		return;
722 
723 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
724 		   connector->display_info.width_mm,
725 		   connector->display_info.height_mm);
726 	seq_printf(m, "\tsubpixel order: %s\n",
727 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
728 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
729 
730 	if (!encoder)
731 		return;
732 
733 	switch (connector->connector_type) {
734 	case DRM_MODE_CONNECTOR_DisplayPort:
735 	case DRM_MODE_CONNECTOR_eDP:
736 		if (encoder->type == INTEL_OUTPUT_DP_MST)
737 			intel_dp_mst_info(m, intel_connector);
738 		else
739 			intel_dp_info(m, intel_connector);
740 		break;
741 	case DRM_MODE_CONNECTOR_LVDS:
742 		if (encoder->type == INTEL_OUTPUT_LVDS)
743 			intel_lvds_info(m, intel_connector);
744 		break;
745 	case DRM_MODE_CONNECTOR_HDMIA:
746 		if (encoder->type == INTEL_OUTPUT_HDMI ||
747 		    encoder->type == INTEL_OUTPUT_DDI)
748 			intel_hdmi_info(m, intel_connector);
749 		break;
750 	default:
751 		break;
752 	}
753 
754 	seq_puts(m, "\tHDCP version: ");
755 	intel_hdcp_info(m, intel_connector);
756 
757 	seq_printf(m, "\tmodes:\n");
758 	list_for_each_entry(mode, &connector->modes, head)
759 		intel_seq_print_mode(m, 2, mode);
760 }
761 
762 static const char *plane_type(enum drm_plane_type type)
763 {
764 	switch (type) {
765 	case DRM_PLANE_TYPE_OVERLAY:
766 		return "OVL";
767 	case DRM_PLANE_TYPE_PRIMARY:
768 		return "PRI";
769 	case DRM_PLANE_TYPE_CURSOR:
770 		return "CUR";
771 	/*
772 	 * Deliberately omitting default: to generate compiler warnings
773 	 * when a new drm_plane_type gets added.
774 	 */
775 	}
776 
777 	return "unknown";
778 }
779 
780 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
781 {
782 	/*
783 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
784 	 * will print them all to visualize if the values are misused
785 	 */
786 	snprintf(buf, bufsize,
787 		 "%s%s%s%s%s%s(0x%08x)",
788 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
789 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
790 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
791 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
792 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
793 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
794 		 rotation);
795 }
796 
797 static const char *plane_visibility(const struct intel_plane_state *plane_state)
798 {
799 	if (plane_state->uapi.visible)
800 		return "visible";
801 
802 	if (plane_state->planar_slave)
803 		return "planar-slave";
804 
805 	return "hidden";
806 }
807 
808 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
809 {
810 	const struct intel_plane_state *plane_state =
811 		to_intel_plane_state(plane->base.state);
812 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
813 	struct drm_rect src, dst;
814 	char rot_str[48];
815 
816 	src = drm_plane_state_src(&plane_state->uapi);
817 	dst = drm_plane_state_dest(&plane_state->uapi);
818 
819 	plane_rotation(rot_str, sizeof(rot_str),
820 		       plane_state->uapi.rotation);
821 
822 	seq_puts(m, "\t\tuapi: [FB:");
823 	if (fb)
824 		seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
825 			   &fb->format->format, fb->modifier, fb->width,
826 			   fb->height);
827 	else
828 		seq_puts(m, "0] n/a,0x0,0x0,");
829 	seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
830 		   ", rotation=%s\n", plane_visibility(plane_state),
831 		   DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
832 
833 	if (plane_state->planar_linked_plane)
834 		seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
835 			   plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
836 			   plane_state->planar_slave ? "slave" : "master");
837 }
838 
839 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
840 {
841 	const struct intel_plane_state *plane_state =
842 		to_intel_plane_state(plane->base.state);
843 	const struct drm_framebuffer *fb = plane_state->hw.fb;
844 	char rot_str[48];
845 
846 	if (!fb)
847 		return;
848 
849 	plane_rotation(rot_str, sizeof(rot_str),
850 		       plane_state->hw.rotation);
851 
852 	seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
853 		   DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
854 		   fb->base.id, &fb->format->format,
855 		   fb->modifier, fb->width, fb->height,
856 		   yesno(plane_state->uapi.visible),
857 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
858 		   DRM_RECT_ARG(&plane_state->uapi.dst),
859 		   rot_str);
860 }
861 
862 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
863 {
864 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
865 	struct intel_plane *plane;
866 
867 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
868 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
869 			   plane->base.base.id, plane->base.name,
870 			   plane_type(plane->base.type));
871 		intel_plane_uapi_info(m, plane);
872 		intel_plane_hw_info(m, plane);
873 	}
874 }
875 
876 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
877 {
878 	const struct intel_crtc_state *crtc_state =
879 		to_intel_crtc_state(crtc->base.state);
880 	int num_scalers = crtc->num_scalers;
881 	int i;
882 
883 	/* Not all platformas have a scaler */
884 	if (num_scalers) {
885 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
886 			   num_scalers,
887 			   crtc_state->scaler_state.scaler_users,
888 			   crtc_state->scaler_state.scaler_id);
889 
890 		for (i = 0; i < num_scalers; i++) {
891 			const struct intel_scaler *sc =
892 				&crtc_state->scaler_state.scalers[i];
893 
894 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
895 				   i, yesno(sc->in_use), sc->mode);
896 		}
897 		seq_puts(m, "\n");
898 	} else {
899 		seq_puts(m, "\tNo scalers available on this platform\n");
900 	}
901 }
902 
903 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
904 static void crtc_updates_info(struct seq_file *m,
905 			      struct intel_crtc *crtc,
906 			      const char *hdr)
907 {
908 	u64 count;
909 	int row;
910 
911 	count = 0;
912 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
913 		count += crtc->debug.vbl.times[row];
914 	seq_printf(m, "%sUpdates: %llu\n", hdr, count);
915 	if (!count)
916 		return;
917 
918 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
919 		char columns[80] = "       |";
920 		unsigned int x;
921 
922 		if (row & 1) {
923 			const char *units;
924 
925 			if (row > 10) {
926 				x = 1000000;
927 				units = "ms";
928 			} else {
929 				x = 1000;
930 				units = "us";
931 			}
932 
933 			snprintf(columns, sizeof(columns), "%4ld%s |",
934 				 DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
935 		}
936 
937 		if (crtc->debug.vbl.times[row]) {
938 			x = ilog2(crtc->debug.vbl.times[row]);
939 			memset(columns + 8, '*', x);
940 			columns[8 + x] = '\0';
941 		}
942 
943 		seq_printf(m, "%s%s\n", hdr, columns);
944 	}
945 
946 	seq_printf(m, "%sMin update: %lluns\n",
947 		   hdr, crtc->debug.vbl.min);
948 	seq_printf(m, "%sMax update: %lluns\n",
949 		   hdr, crtc->debug.vbl.max);
950 	seq_printf(m, "%sAverage update: %lluns\n",
951 		   hdr, div64_u64(crtc->debug.vbl.sum,  count));
952 	seq_printf(m, "%sOverruns > %uus: %u\n",
953 		   hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
954 }
955 
956 static int crtc_updates_show(struct seq_file *m, void *data)
957 {
958 	crtc_updates_info(m, m->private, "");
959 	return 0;
960 }
961 
962 static int crtc_updates_open(struct inode *inode, struct file *file)
963 {
964 	return single_open(file, crtc_updates_show, inode->i_private);
965 }
966 
967 static ssize_t crtc_updates_write(struct file *file,
968 				  const char __user *ubuf,
969 				  size_t len, loff_t *offp)
970 {
971 	struct seq_file *m = file->private_data;
972 	struct intel_crtc *crtc = m->private;
973 
974 	/* May race with an update. Meh. */
975 	memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
976 
977 	return len;
978 }
979 
980 static const struct file_operations crtc_updates_fops = {
981 	.owner = THIS_MODULE,
982 	.open = crtc_updates_open,
983 	.read = seq_read,
984 	.llseek = seq_lseek,
985 	.release = single_release,
986 	.write = crtc_updates_write
987 };
988 
989 static void crtc_updates_add(struct drm_crtc *crtc)
990 {
991 	debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
992 			    to_intel_crtc(crtc), &crtc_updates_fops);
993 }
994 
995 #else
996 static void crtc_updates_info(struct seq_file *m,
997 			      struct intel_crtc *crtc,
998 			      const char *hdr)
999 {
1000 }
1001 
1002 static void crtc_updates_add(struct drm_crtc *crtc)
1003 {
1004 }
1005 #endif
1006 
1007 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
1008 {
1009 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1010 	const struct intel_crtc_state *crtc_state =
1011 		to_intel_crtc_state(crtc->base.state);
1012 	struct intel_encoder *encoder;
1013 
1014 	seq_printf(m, "[CRTC:%d:%s]:\n",
1015 		   crtc->base.base.id, crtc->base.name);
1016 
1017 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
1018 		   yesno(crtc_state->uapi.enable),
1019 		   yesno(crtc_state->uapi.active),
1020 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
1021 
1022 	if (crtc_state->hw.enable) {
1023 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
1024 			   yesno(crtc_state->hw.active),
1025 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
1026 
1027 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
1028 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
1029 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
1030 
1031 		intel_scaler_info(m, crtc);
1032 	}
1033 
1034 	if (crtc_state->bigjoiner)
1035 		seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
1036 			   crtc_state->bigjoiner_linked_crtc->base.base.id,
1037 			   crtc_state->bigjoiner_linked_crtc->base.name,
1038 			   crtc_state->bigjoiner_slave ? "slave" : "master");
1039 
1040 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
1041 				    crtc_state->uapi.encoder_mask)
1042 		intel_encoder_info(m, crtc, encoder);
1043 
1044 	intel_plane_info(m, crtc);
1045 
1046 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
1047 		   yesno(!crtc->cpu_fifo_underrun_disabled),
1048 		   yesno(!crtc->pch_fifo_underrun_disabled));
1049 
1050 	crtc_updates_info(m, crtc, "\t");
1051 }
1052 
1053 static int i915_display_info(struct seq_file *m, void *unused)
1054 {
1055 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1056 	struct drm_device *dev = &dev_priv->drm;
1057 	struct intel_crtc *crtc;
1058 	struct drm_connector *connector;
1059 	struct drm_connector_list_iter conn_iter;
1060 	intel_wakeref_t wakeref;
1061 
1062 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1063 
1064 	drm_modeset_lock_all(dev);
1065 
1066 	seq_printf(m, "CRTC info\n");
1067 	seq_printf(m, "---------\n");
1068 	for_each_intel_crtc(dev, crtc)
1069 		intel_crtc_info(m, crtc);
1070 
1071 	seq_printf(m, "\n");
1072 	seq_printf(m, "Connector info\n");
1073 	seq_printf(m, "--------------\n");
1074 	drm_connector_list_iter_begin(dev, &conn_iter);
1075 	drm_for_each_connector_iter(connector, &conn_iter)
1076 		intel_connector_info(m, connector);
1077 	drm_connector_list_iter_end(&conn_iter);
1078 
1079 	drm_modeset_unlock_all(dev);
1080 
1081 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1082 
1083 	return 0;
1084 }
1085 
1086 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
1087 {
1088 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1089 	struct drm_device *dev = &dev_priv->drm;
1090 	int i;
1091 
1092 	drm_modeset_lock_all(dev);
1093 
1094 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
1095 		   dev_priv->dpll.ref_clks.nssc,
1096 		   dev_priv->dpll.ref_clks.ssc);
1097 
1098 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
1099 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
1100 
1101 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
1102 			   pll->info->id);
1103 		seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
1104 			   pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
1105 		seq_printf(m, " tracked hardware state:\n");
1106 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
1107 		seq_printf(m, " dpll_md: 0x%08x\n",
1108 			   pll->state.hw_state.dpll_md);
1109 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
1110 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
1111 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
1112 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
1113 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
1114 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
1115 			   pll->state.hw_state.mg_refclkin_ctl);
1116 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
1117 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
1118 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
1119 			   pll->state.hw_state.mg_clktop2_hsclkctl);
1120 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
1121 			   pll->state.hw_state.mg_pll_div0);
1122 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
1123 			   pll->state.hw_state.mg_pll_div1);
1124 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
1125 			   pll->state.hw_state.mg_pll_lf);
1126 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
1127 			   pll->state.hw_state.mg_pll_frac_lock);
1128 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
1129 			   pll->state.hw_state.mg_pll_ssc);
1130 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
1131 			   pll->state.hw_state.mg_pll_bias);
1132 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
1133 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
1134 	}
1135 	drm_modeset_unlock_all(dev);
1136 
1137 	return 0;
1138 }
1139 
1140 static int i915_ipc_status_show(struct seq_file *m, void *data)
1141 {
1142 	struct drm_i915_private *dev_priv = m->private;
1143 
1144 	seq_printf(m, "Isochronous Priority Control: %s\n",
1145 			yesno(dev_priv->ipc_enabled));
1146 	return 0;
1147 }
1148 
1149 static int i915_ipc_status_open(struct inode *inode, struct file *file)
1150 {
1151 	struct drm_i915_private *dev_priv = inode->i_private;
1152 
1153 	if (!HAS_IPC(dev_priv))
1154 		return -ENODEV;
1155 
1156 	return single_open(file, i915_ipc_status_show, dev_priv);
1157 }
1158 
1159 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1160 				     size_t len, loff_t *offp)
1161 {
1162 	struct seq_file *m = file->private_data;
1163 	struct drm_i915_private *dev_priv = m->private;
1164 	intel_wakeref_t wakeref;
1165 	bool enable;
1166 	int ret;
1167 
1168 	ret = kstrtobool_from_user(ubuf, len, &enable);
1169 	if (ret < 0)
1170 		return ret;
1171 
1172 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1173 		if (!dev_priv->ipc_enabled && enable)
1174 			drm_info(&dev_priv->drm,
1175 				 "Enabling IPC: WM will be proper only after next commit\n");
1176 		dev_priv->ipc_enabled = enable;
1177 		intel_enable_ipc(dev_priv);
1178 	}
1179 
1180 	return len;
1181 }
1182 
1183 static const struct file_operations i915_ipc_status_fops = {
1184 	.owner = THIS_MODULE,
1185 	.open = i915_ipc_status_open,
1186 	.read = seq_read,
1187 	.llseek = seq_lseek,
1188 	.release = single_release,
1189 	.write = i915_ipc_status_write
1190 };
1191 
1192 static int i915_ddb_info(struct seq_file *m, void *unused)
1193 {
1194 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1195 	struct drm_device *dev = &dev_priv->drm;
1196 	struct skl_ddb_entry *entry;
1197 	struct intel_crtc *crtc;
1198 
1199 	if (DISPLAY_VER(dev_priv) < 9)
1200 		return -ENODEV;
1201 
1202 	drm_modeset_lock_all(dev);
1203 
1204 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1205 
1206 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1207 		struct intel_crtc_state *crtc_state =
1208 			to_intel_crtc_state(crtc->base.state);
1209 		enum pipe pipe = crtc->pipe;
1210 		enum plane_id plane_id;
1211 
1212 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1213 
1214 		for_each_plane_id_on_crtc(crtc, plane_id) {
1215 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1216 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1217 				   entry->start, entry->end,
1218 				   skl_ddb_entry_size(entry));
1219 		}
1220 
1221 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1222 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1223 			   entry->end, skl_ddb_entry_size(entry));
1224 	}
1225 
1226 	drm_modeset_unlock_all(dev);
1227 
1228 	return 0;
1229 }
1230 
1231 static void drrs_status_per_crtc(struct seq_file *m,
1232 				 struct drm_device *dev,
1233 				 struct intel_crtc *crtc)
1234 {
1235 	struct drm_i915_private *dev_priv = to_i915(dev);
1236 	struct i915_drrs *drrs = &dev_priv->drrs;
1237 	int vrefresh = 0;
1238 	struct drm_connector *connector;
1239 	struct drm_connector_list_iter conn_iter;
1240 
1241 	drm_connector_list_iter_begin(dev, &conn_iter);
1242 	drm_for_each_connector_iter(connector, &conn_iter) {
1243 		bool supported = false;
1244 
1245 		if (connector->state->crtc != &crtc->base)
1246 			continue;
1247 
1248 		seq_printf(m, "%s:\n", connector->name);
1249 
1250 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1251 		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1252 			supported = true;
1253 
1254 		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1255 	}
1256 	drm_connector_list_iter_end(&conn_iter);
1257 
1258 	seq_puts(m, "\n");
1259 
1260 	if (to_intel_crtc_state(crtc->base.state)->has_drrs) {
1261 		struct intel_panel *panel;
1262 
1263 		mutex_lock(&drrs->mutex);
1264 		/* DRRS Supported */
1265 		seq_puts(m, "\tDRRS Enabled: Yes\n");
1266 
1267 		/* disable_drrs() will make drrs->dp NULL */
1268 		if (!drrs->dp) {
1269 			seq_puts(m, "Idleness DRRS: Disabled\n");
1270 			mutex_unlock(&drrs->mutex);
1271 			return;
1272 		}
1273 
1274 		panel = &drrs->dp->attached_connector->panel;
1275 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1276 					drrs->busy_frontbuffer_bits);
1277 
1278 		seq_puts(m, "\n\t\t");
1279 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1280 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1281 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1282 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1283 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1284 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1285 		} else {
1286 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1287 						drrs->refresh_rate_type);
1288 			mutex_unlock(&drrs->mutex);
1289 			return;
1290 		}
1291 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1292 
1293 		seq_puts(m, "\n\t\t");
1294 		mutex_unlock(&drrs->mutex);
1295 	} else {
1296 		/* DRRS not supported. Print the VBT parameter*/
1297 		seq_puts(m, "\tDRRS Enabled : No");
1298 	}
1299 	seq_puts(m, "\n");
1300 }
1301 
1302 static int i915_drrs_status(struct seq_file *m, void *unused)
1303 {
1304 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1305 	struct drm_device *dev = &dev_priv->drm;
1306 	struct intel_crtc *crtc;
1307 	int active_crtc_cnt = 0;
1308 
1309 	drm_modeset_lock_all(dev);
1310 	for_each_intel_crtc(dev, crtc) {
1311 		if (crtc->base.state->active) {
1312 			active_crtc_cnt++;
1313 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1314 
1315 			drrs_status_per_crtc(m, dev, crtc);
1316 		}
1317 	}
1318 	drm_modeset_unlock_all(dev);
1319 
1320 	if (!active_crtc_cnt)
1321 		seq_puts(m, "No active crtc found\n");
1322 
1323 	return 0;
1324 }
1325 
1326 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1327 				seq_puts(m, "LPSP: disabled\n"))
1328 
1329 static bool
1330 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1331 			      enum i915_power_well_id power_well_id)
1332 {
1333 	intel_wakeref_t wakeref;
1334 	bool is_enabled;
1335 
1336 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1337 	is_enabled = intel_display_power_well_is_enabled(i915,
1338 							 power_well_id);
1339 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1340 
1341 	return is_enabled;
1342 }
1343 
1344 static int i915_lpsp_status(struct seq_file *m, void *unused)
1345 {
1346 	struct drm_i915_private *i915 = node_to_i915(m->private);
1347 
1348 	if (DISPLAY_VER(i915) >= 13) {
1349 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915,
1350 							   SKL_DISP_PW_2));
1351 		return 0;
1352 	}
1353 
1354 	switch (DISPLAY_VER(i915)) {
1355 	case 12:
1356 	case 11:
1357 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1358 		break;
1359 	case 10:
1360 	case 9:
1361 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1362 		break;
1363 	default:
1364 		/*
1365 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1366 		 * support lpsp.
1367 		 */
1368 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1369 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1370 		else
1371 			seq_puts(m, "LPSP: not supported\n");
1372 	}
1373 
1374 	return 0;
1375 }
1376 
1377 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1378 {
1379 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1380 	struct drm_device *dev = &dev_priv->drm;
1381 	struct intel_encoder *intel_encoder;
1382 	struct intel_digital_port *dig_port;
1383 	struct drm_connector *connector;
1384 	struct drm_connector_list_iter conn_iter;
1385 
1386 	drm_connector_list_iter_begin(dev, &conn_iter);
1387 	drm_for_each_connector_iter(connector, &conn_iter) {
1388 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1389 			continue;
1390 
1391 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1392 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1393 			continue;
1394 
1395 		dig_port = enc_to_dig_port(intel_encoder);
1396 		if (!dig_port->dp.can_mst)
1397 			continue;
1398 
1399 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1400 			   dig_port->base.base.base.id,
1401 			   dig_port->base.base.name);
1402 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1403 	}
1404 	drm_connector_list_iter_end(&conn_iter);
1405 
1406 	return 0;
1407 }
1408 
1409 static ssize_t i915_displayport_test_active_write(struct file *file,
1410 						  const char __user *ubuf,
1411 						  size_t len, loff_t *offp)
1412 {
1413 	char *input_buffer;
1414 	int status = 0;
1415 	struct drm_device *dev;
1416 	struct drm_connector *connector;
1417 	struct drm_connector_list_iter conn_iter;
1418 	struct intel_dp *intel_dp;
1419 	int val = 0;
1420 
1421 	dev = ((struct seq_file *)file->private_data)->private;
1422 
1423 	if (len == 0)
1424 		return 0;
1425 
1426 	input_buffer = memdup_user_nul(ubuf, len);
1427 	if (IS_ERR(input_buffer))
1428 		return PTR_ERR(input_buffer);
1429 
1430 	drm_dbg(&to_i915(dev)->drm,
1431 		"Copied %d bytes from user\n", (unsigned int)len);
1432 
1433 	drm_connector_list_iter_begin(dev, &conn_iter);
1434 	drm_for_each_connector_iter(connector, &conn_iter) {
1435 		struct intel_encoder *encoder;
1436 
1437 		if (connector->connector_type !=
1438 		    DRM_MODE_CONNECTOR_DisplayPort)
1439 			continue;
1440 
1441 		encoder = to_intel_encoder(connector->encoder);
1442 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1443 			continue;
1444 
1445 		if (encoder && connector->status == connector_status_connected) {
1446 			intel_dp = enc_to_intel_dp(encoder);
1447 			status = kstrtoint(input_buffer, 10, &val);
1448 			if (status < 0)
1449 				break;
1450 			drm_dbg(&to_i915(dev)->drm,
1451 				"Got %d for test active\n", val);
1452 			/* To prevent erroneous activation of the compliance
1453 			 * testing code, only accept an actual value of 1 here
1454 			 */
1455 			if (val == 1)
1456 				intel_dp->compliance.test_active = true;
1457 			else
1458 				intel_dp->compliance.test_active = false;
1459 		}
1460 	}
1461 	drm_connector_list_iter_end(&conn_iter);
1462 	kfree(input_buffer);
1463 	if (status < 0)
1464 		return status;
1465 
1466 	*offp += len;
1467 	return len;
1468 }
1469 
1470 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1471 {
1472 	struct drm_i915_private *dev_priv = m->private;
1473 	struct drm_device *dev = &dev_priv->drm;
1474 	struct drm_connector *connector;
1475 	struct drm_connector_list_iter conn_iter;
1476 	struct intel_dp *intel_dp;
1477 
1478 	drm_connector_list_iter_begin(dev, &conn_iter);
1479 	drm_for_each_connector_iter(connector, &conn_iter) {
1480 		struct intel_encoder *encoder;
1481 
1482 		if (connector->connector_type !=
1483 		    DRM_MODE_CONNECTOR_DisplayPort)
1484 			continue;
1485 
1486 		encoder = to_intel_encoder(connector->encoder);
1487 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1488 			continue;
1489 
1490 		if (encoder && connector->status == connector_status_connected) {
1491 			intel_dp = enc_to_intel_dp(encoder);
1492 			if (intel_dp->compliance.test_active)
1493 				seq_puts(m, "1");
1494 			else
1495 				seq_puts(m, "0");
1496 		} else
1497 			seq_puts(m, "0");
1498 	}
1499 	drm_connector_list_iter_end(&conn_iter);
1500 
1501 	return 0;
1502 }
1503 
1504 static int i915_displayport_test_active_open(struct inode *inode,
1505 					     struct file *file)
1506 {
1507 	return single_open(file, i915_displayport_test_active_show,
1508 			   inode->i_private);
1509 }
1510 
1511 static const struct file_operations i915_displayport_test_active_fops = {
1512 	.owner = THIS_MODULE,
1513 	.open = i915_displayport_test_active_open,
1514 	.read = seq_read,
1515 	.llseek = seq_lseek,
1516 	.release = single_release,
1517 	.write = i915_displayport_test_active_write
1518 };
1519 
1520 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1521 {
1522 	struct drm_i915_private *dev_priv = m->private;
1523 	struct drm_device *dev = &dev_priv->drm;
1524 	struct drm_connector *connector;
1525 	struct drm_connector_list_iter conn_iter;
1526 	struct intel_dp *intel_dp;
1527 
1528 	drm_connector_list_iter_begin(dev, &conn_iter);
1529 	drm_for_each_connector_iter(connector, &conn_iter) {
1530 		struct intel_encoder *encoder;
1531 
1532 		if (connector->connector_type !=
1533 		    DRM_MODE_CONNECTOR_DisplayPort)
1534 			continue;
1535 
1536 		encoder = to_intel_encoder(connector->encoder);
1537 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1538 			continue;
1539 
1540 		if (encoder && connector->status == connector_status_connected) {
1541 			intel_dp = enc_to_intel_dp(encoder);
1542 			if (intel_dp->compliance.test_type ==
1543 			    DP_TEST_LINK_EDID_READ)
1544 				seq_printf(m, "%lx",
1545 					   intel_dp->compliance.test_data.edid);
1546 			else if (intel_dp->compliance.test_type ==
1547 				 DP_TEST_LINK_VIDEO_PATTERN) {
1548 				seq_printf(m, "hdisplay: %d\n",
1549 					   intel_dp->compliance.test_data.hdisplay);
1550 				seq_printf(m, "vdisplay: %d\n",
1551 					   intel_dp->compliance.test_data.vdisplay);
1552 				seq_printf(m, "bpc: %u\n",
1553 					   intel_dp->compliance.test_data.bpc);
1554 			} else if (intel_dp->compliance.test_type ==
1555 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1556 				seq_printf(m, "pattern: %d\n",
1557 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1558 				seq_printf(m, "Number of lanes: %d\n",
1559 					   intel_dp->compliance.test_data.phytest.num_lanes);
1560 				seq_printf(m, "Link Rate: %d\n",
1561 					   intel_dp->compliance.test_data.phytest.link_rate);
1562 				seq_printf(m, "level: %02x\n",
1563 					   intel_dp->train_set[0]);
1564 			}
1565 		} else
1566 			seq_puts(m, "0");
1567 	}
1568 	drm_connector_list_iter_end(&conn_iter);
1569 
1570 	return 0;
1571 }
1572 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1573 
1574 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1575 {
1576 	struct drm_i915_private *dev_priv = m->private;
1577 	struct drm_device *dev = &dev_priv->drm;
1578 	struct drm_connector *connector;
1579 	struct drm_connector_list_iter conn_iter;
1580 	struct intel_dp *intel_dp;
1581 
1582 	drm_connector_list_iter_begin(dev, &conn_iter);
1583 	drm_for_each_connector_iter(connector, &conn_iter) {
1584 		struct intel_encoder *encoder;
1585 
1586 		if (connector->connector_type !=
1587 		    DRM_MODE_CONNECTOR_DisplayPort)
1588 			continue;
1589 
1590 		encoder = to_intel_encoder(connector->encoder);
1591 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1592 			continue;
1593 
1594 		if (encoder && connector->status == connector_status_connected) {
1595 			intel_dp = enc_to_intel_dp(encoder);
1596 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1597 		} else
1598 			seq_puts(m, "0");
1599 	}
1600 	drm_connector_list_iter_end(&conn_iter);
1601 
1602 	return 0;
1603 }
1604 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1605 
1606 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1607 {
1608 	struct drm_i915_private *dev_priv = m->private;
1609 	struct drm_device *dev = &dev_priv->drm;
1610 	int level;
1611 	int num_levels;
1612 
1613 	if (IS_CHERRYVIEW(dev_priv))
1614 		num_levels = 3;
1615 	else if (IS_VALLEYVIEW(dev_priv))
1616 		num_levels = 1;
1617 	else if (IS_G4X(dev_priv))
1618 		num_levels = 3;
1619 	else
1620 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1621 
1622 	drm_modeset_lock_all(dev);
1623 
1624 	for (level = 0; level < num_levels; level++) {
1625 		unsigned int latency = wm[level];
1626 
1627 		/*
1628 		 * - WM1+ latency values in 0.5us units
1629 		 * - latencies are in us on gen9/vlv/chv
1630 		 */
1631 		if (DISPLAY_VER(dev_priv) >= 9 ||
1632 		    IS_VALLEYVIEW(dev_priv) ||
1633 		    IS_CHERRYVIEW(dev_priv) ||
1634 		    IS_G4X(dev_priv))
1635 			latency *= 10;
1636 		else if (level > 0)
1637 			latency *= 5;
1638 
1639 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1640 			   level, wm[level], latency / 10, latency % 10);
1641 	}
1642 
1643 	drm_modeset_unlock_all(dev);
1644 }
1645 
1646 static int pri_wm_latency_show(struct seq_file *m, void *data)
1647 {
1648 	struct drm_i915_private *dev_priv = m->private;
1649 	const u16 *latencies;
1650 
1651 	if (DISPLAY_VER(dev_priv) >= 9)
1652 		latencies = dev_priv->wm.skl_latency;
1653 	else
1654 		latencies = dev_priv->wm.pri_latency;
1655 
1656 	wm_latency_show(m, latencies);
1657 
1658 	return 0;
1659 }
1660 
1661 static int spr_wm_latency_show(struct seq_file *m, void *data)
1662 {
1663 	struct drm_i915_private *dev_priv = m->private;
1664 	const u16 *latencies;
1665 
1666 	if (DISPLAY_VER(dev_priv) >= 9)
1667 		latencies = dev_priv->wm.skl_latency;
1668 	else
1669 		latencies = dev_priv->wm.spr_latency;
1670 
1671 	wm_latency_show(m, latencies);
1672 
1673 	return 0;
1674 }
1675 
1676 static int cur_wm_latency_show(struct seq_file *m, void *data)
1677 {
1678 	struct drm_i915_private *dev_priv = m->private;
1679 	const u16 *latencies;
1680 
1681 	if (DISPLAY_VER(dev_priv) >= 9)
1682 		latencies = dev_priv->wm.skl_latency;
1683 	else
1684 		latencies = dev_priv->wm.cur_latency;
1685 
1686 	wm_latency_show(m, latencies);
1687 
1688 	return 0;
1689 }
1690 
1691 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1692 {
1693 	struct drm_i915_private *dev_priv = inode->i_private;
1694 
1695 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
1696 		return -ENODEV;
1697 
1698 	return single_open(file, pri_wm_latency_show, dev_priv);
1699 }
1700 
1701 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1702 {
1703 	struct drm_i915_private *dev_priv = inode->i_private;
1704 
1705 	if (HAS_GMCH(dev_priv))
1706 		return -ENODEV;
1707 
1708 	return single_open(file, spr_wm_latency_show, dev_priv);
1709 }
1710 
1711 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1712 {
1713 	struct drm_i915_private *dev_priv = inode->i_private;
1714 
1715 	if (HAS_GMCH(dev_priv))
1716 		return -ENODEV;
1717 
1718 	return single_open(file, cur_wm_latency_show, dev_priv);
1719 }
1720 
1721 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1722 				size_t len, loff_t *offp, u16 wm[8])
1723 {
1724 	struct seq_file *m = file->private_data;
1725 	struct drm_i915_private *dev_priv = m->private;
1726 	struct drm_device *dev = &dev_priv->drm;
1727 	u16 new[8] = { 0 };
1728 	int num_levels;
1729 	int level;
1730 	int ret;
1731 	char tmp[32];
1732 
1733 	if (IS_CHERRYVIEW(dev_priv))
1734 		num_levels = 3;
1735 	else if (IS_VALLEYVIEW(dev_priv))
1736 		num_levels = 1;
1737 	else if (IS_G4X(dev_priv))
1738 		num_levels = 3;
1739 	else
1740 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1741 
1742 	if (len >= sizeof(tmp))
1743 		return -EINVAL;
1744 
1745 	if (copy_from_user(tmp, ubuf, len))
1746 		return -EFAULT;
1747 
1748 	tmp[len] = '\0';
1749 
1750 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1751 		     &new[0], &new[1], &new[2], &new[3],
1752 		     &new[4], &new[5], &new[6], &new[7]);
1753 	if (ret != num_levels)
1754 		return -EINVAL;
1755 
1756 	drm_modeset_lock_all(dev);
1757 
1758 	for (level = 0; level < num_levels; level++)
1759 		wm[level] = new[level];
1760 
1761 	drm_modeset_unlock_all(dev);
1762 
1763 	return len;
1764 }
1765 
1766 
1767 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1768 				    size_t len, loff_t *offp)
1769 {
1770 	struct seq_file *m = file->private_data;
1771 	struct drm_i915_private *dev_priv = m->private;
1772 	u16 *latencies;
1773 
1774 	if (DISPLAY_VER(dev_priv) >= 9)
1775 		latencies = dev_priv->wm.skl_latency;
1776 	else
1777 		latencies = dev_priv->wm.pri_latency;
1778 
1779 	return wm_latency_write(file, ubuf, len, offp, latencies);
1780 }
1781 
1782 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1783 				    size_t len, loff_t *offp)
1784 {
1785 	struct seq_file *m = file->private_data;
1786 	struct drm_i915_private *dev_priv = m->private;
1787 	u16 *latencies;
1788 
1789 	if (DISPLAY_VER(dev_priv) >= 9)
1790 		latencies = dev_priv->wm.skl_latency;
1791 	else
1792 		latencies = dev_priv->wm.spr_latency;
1793 
1794 	return wm_latency_write(file, ubuf, len, offp, latencies);
1795 }
1796 
1797 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1798 				    size_t len, loff_t *offp)
1799 {
1800 	struct seq_file *m = file->private_data;
1801 	struct drm_i915_private *dev_priv = m->private;
1802 	u16 *latencies;
1803 
1804 	if (DISPLAY_VER(dev_priv) >= 9)
1805 		latencies = dev_priv->wm.skl_latency;
1806 	else
1807 		latencies = dev_priv->wm.cur_latency;
1808 
1809 	return wm_latency_write(file, ubuf, len, offp, latencies);
1810 }
1811 
1812 static const struct file_operations i915_pri_wm_latency_fops = {
1813 	.owner = THIS_MODULE,
1814 	.open = pri_wm_latency_open,
1815 	.read = seq_read,
1816 	.llseek = seq_lseek,
1817 	.release = single_release,
1818 	.write = pri_wm_latency_write
1819 };
1820 
1821 static const struct file_operations i915_spr_wm_latency_fops = {
1822 	.owner = THIS_MODULE,
1823 	.open = spr_wm_latency_open,
1824 	.read = seq_read,
1825 	.llseek = seq_lseek,
1826 	.release = single_release,
1827 	.write = spr_wm_latency_write
1828 };
1829 
1830 static const struct file_operations i915_cur_wm_latency_fops = {
1831 	.owner = THIS_MODULE,
1832 	.open = cur_wm_latency_open,
1833 	.read = seq_read,
1834 	.llseek = seq_lseek,
1835 	.release = single_release,
1836 	.write = cur_wm_latency_write
1837 };
1838 
1839 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1840 {
1841 	struct drm_i915_private *dev_priv = m->private;
1842 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1843 
1844 	/* Synchronize with everything first in case there's been an HPD
1845 	 * storm, but we haven't finished handling it in the kernel yet
1846 	 */
1847 	intel_synchronize_irq(dev_priv);
1848 	flush_work(&dev_priv->hotplug.dig_port_work);
1849 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1850 
1851 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1852 	seq_printf(m, "Detected: %s\n",
1853 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1854 
1855 	return 0;
1856 }
1857 
1858 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1859 					const char __user *ubuf, size_t len,
1860 					loff_t *offp)
1861 {
1862 	struct seq_file *m = file->private_data;
1863 	struct drm_i915_private *dev_priv = m->private;
1864 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1865 	unsigned int new_threshold;
1866 	int i;
1867 	char *newline;
1868 	char tmp[16];
1869 
1870 	if (len >= sizeof(tmp))
1871 		return -EINVAL;
1872 
1873 	if (copy_from_user(tmp, ubuf, len))
1874 		return -EFAULT;
1875 
1876 	tmp[len] = '\0';
1877 
1878 	/* Strip newline, if any */
1879 	newline = strchr(tmp, '\n');
1880 	if (newline)
1881 		*newline = '\0';
1882 
1883 	if (strcmp(tmp, "reset") == 0)
1884 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1885 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1886 		return -EINVAL;
1887 
1888 	if (new_threshold > 0)
1889 		drm_dbg_kms(&dev_priv->drm,
1890 			    "Setting HPD storm detection threshold to %d\n",
1891 			    new_threshold);
1892 	else
1893 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1894 
1895 	spin_lock_irq(&dev_priv->irq_lock);
1896 	hotplug->hpd_storm_threshold = new_threshold;
1897 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1898 	for_each_hpd_pin(i)
1899 		hotplug->stats[i].count = 0;
1900 	spin_unlock_irq(&dev_priv->irq_lock);
1901 
1902 	/* Re-enable hpd immediately if we were in an irq storm */
1903 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1904 
1905 	return len;
1906 }
1907 
1908 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1909 {
1910 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1911 }
1912 
1913 static const struct file_operations i915_hpd_storm_ctl_fops = {
1914 	.owner = THIS_MODULE,
1915 	.open = i915_hpd_storm_ctl_open,
1916 	.read = seq_read,
1917 	.llseek = seq_lseek,
1918 	.release = single_release,
1919 	.write = i915_hpd_storm_ctl_write
1920 };
1921 
1922 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1923 {
1924 	struct drm_i915_private *dev_priv = m->private;
1925 
1926 	seq_printf(m, "Enabled: %s\n",
1927 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1928 
1929 	return 0;
1930 }
1931 
1932 static int
1933 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1934 {
1935 	return single_open(file, i915_hpd_short_storm_ctl_show,
1936 			   inode->i_private);
1937 }
1938 
1939 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1940 					      const char __user *ubuf,
1941 					      size_t len, loff_t *offp)
1942 {
1943 	struct seq_file *m = file->private_data;
1944 	struct drm_i915_private *dev_priv = m->private;
1945 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1946 	char *newline;
1947 	char tmp[16];
1948 	int i;
1949 	bool new_state;
1950 
1951 	if (len >= sizeof(tmp))
1952 		return -EINVAL;
1953 
1954 	if (copy_from_user(tmp, ubuf, len))
1955 		return -EFAULT;
1956 
1957 	tmp[len] = '\0';
1958 
1959 	/* Strip newline, if any */
1960 	newline = strchr(tmp, '\n');
1961 	if (newline)
1962 		*newline = '\0';
1963 
1964 	/* Reset to the "default" state for this system */
1965 	if (strcmp(tmp, "reset") == 0)
1966 		new_state = !HAS_DP_MST(dev_priv);
1967 	else if (kstrtobool(tmp, &new_state) != 0)
1968 		return -EINVAL;
1969 
1970 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1971 		    new_state ? "En" : "Dis");
1972 
1973 	spin_lock_irq(&dev_priv->irq_lock);
1974 	hotplug->hpd_short_storm_enabled = new_state;
1975 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1976 	for_each_hpd_pin(i)
1977 		hotplug->stats[i].count = 0;
1978 	spin_unlock_irq(&dev_priv->irq_lock);
1979 
1980 	/* Re-enable hpd immediately if we were in an irq storm */
1981 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1982 
1983 	return len;
1984 }
1985 
1986 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1987 	.owner = THIS_MODULE,
1988 	.open = i915_hpd_short_storm_ctl_open,
1989 	.read = seq_read,
1990 	.llseek = seq_lseek,
1991 	.release = single_release,
1992 	.write = i915_hpd_short_storm_ctl_write,
1993 };
1994 
1995 static int i915_drrs_ctl_set(void *data, u64 val)
1996 {
1997 	struct drm_i915_private *dev_priv = data;
1998 	struct drm_device *dev = &dev_priv->drm;
1999 	struct intel_crtc *crtc;
2000 
2001 	if (DISPLAY_VER(dev_priv) < 7)
2002 		return -ENODEV;
2003 
2004 	for_each_intel_crtc(dev, crtc) {
2005 		struct drm_connector_list_iter conn_iter;
2006 		struct intel_crtc_state *crtc_state;
2007 		struct drm_connector *connector;
2008 		struct drm_crtc_commit *commit;
2009 		int ret;
2010 
2011 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
2012 		if (ret)
2013 			return ret;
2014 
2015 		crtc_state = to_intel_crtc_state(crtc->base.state);
2016 
2017 		if (!crtc_state->hw.active ||
2018 		    !crtc_state->has_drrs)
2019 			goto out;
2020 
2021 		commit = crtc_state->uapi.commit;
2022 		if (commit) {
2023 			ret = wait_for_completion_interruptible(&commit->hw_done);
2024 			if (ret)
2025 				goto out;
2026 		}
2027 
2028 		drm_connector_list_iter_begin(dev, &conn_iter);
2029 		drm_for_each_connector_iter(connector, &conn_iter) {
2030 			struct intel_encoder *encoder;
2031 			struct intel_dp *intel_dp;
2032 
2033 			if (!(crtc_state->uapi.connector_mask &
2034 			      drm_connector_mask(connector)))
2035 				continue;
2036 
2037 			encoder = intel_attached_encoder(to_intel_connector(connector));
2038 			if (encoder->type != INTEL_OUTPUT_EDP)
2039 				continue;
2040 
2041 			drm_dbg(&dev_priv->drm,
2042 				"Manually %sabling DRRS. %llu\n",
2043 				val ? "en" : "dis", val);
2044 
2045 			intel_dp = enc_to_intel_dp(encoder);
2046 			if (val)
2047 				intel_edp_drrs_enable(intel_dp,
2048 						      crtc_state);
2049 			else
2050 				intel_edp_drrs_disable(intel_dp,
2051 						       crtc_state);
2052 		}
2053 		drm_connector_list_iter_end(&conn_iter);
2054 
2055 out:
2056 		drm_modeset_unlock(&crtc->base.mutex);
2057 		if (ret)
2058 			return ret;
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
2065 
2066 static ssize_t
2067 i915_fifo_underrun_reset_write(struct file *filp,
2068 			       const char __user *ubuf,
2069 			       size_t cnt, loff_t *ppos)
2070 {
2071 	struct drm_i915_private *dev_priv = filp->private_data;
2072 	struct intel_crtc *crtc;
2073 	struct drm_device *dev = &dev_priv->drm;
2074 	int ret;
2075 	bool reset;
2076 
2077 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
2078 	if (ret)
2079 		return ret;
2080 
2081 	if (!reset)
2082 		return cnt;
2083 
2084 	for_each_intel_crtc(dev, crtc) {
2085 		struct drm_crtc_commit *commit;
2086 		struct intel_crtc_state *crtc_state;
2087 
2088 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
2089 		if (ret)
2090 			return ret;
2091 
2092 		crtc_state = to_intel_crtc_state(crtc->base.state);
2093 		commit = crtc_state->uapi.commit;
2094 		if (commit) {
2095 			ret = wait_for_completion_interruptible(&commit->hw_done);
2096 			if (!ret)
2097 				ret = wait_for_completion_interruptible(&commit->flip_done);
2098 		}
2099 
2100 		if (!ret && crtc_state->hw.active) {
2101 			drm_dbg_kms(&dev_priv->drm,
2102 				    "Re-arming FIFO underruns on pipe %c\n",
2103 				    pipe_name(crtc->pipe));
2104 
2105 			intel_crtc_arm_fifo_underrun(crtc, crtc_state);
2106 		}
2107 
2108 		drm_modeset_unlock(&crtc->base.mutex);
2109 
2110 		if (ret)
2111 			return ret;
2112 	}
2113 
2114 	ret = intel_fbc_reset_underrun(dev_priv);
2115 	if (ret)
2116 		return ret;
2117 
2118 	return cnt;
2119 }
2120 
2121 static const struct file_operations i915_fifo_underrun_reset_ops = {
2122 	.owner = THIS_MODULE,
2123 	.open = simple_open,
2124 	.write = i915_fifo_underrun_reset_write,
2125 	.llseek = default_llseek,
2126 };
2127 
2128 static const struct drm_info_list intel_display_debugfs_list[] = {
2129 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
2130 	{"i915_fbc_status", i915_fbc_status, 0},
2131 	{"i915_ips_status", i915_ips_status, 0},
2132 	{"i915_sr_status", i915_sr_status, 0},
2133 	{"i915_opregion", i915_opregion, 0},
2134 	{"i915_vbt", i915_vbt, 0},
2135 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2136 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
2137 	{"i915_power_domain_info", i915_power_domain_info, 0},
2138 	{"i915_dmc_info", i915_dmc_info, 0},
2139 	{"i915_display_info", i915_display_info, 0},
2140 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
2141 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
2142 	{"i915_ddb_info", i915_ddb_info, 0},
2143 	{"i915_drrs_status", i915_drrs_status, 0},
2144 	{"i915_lpsp_status", i915_lpsp_status, 0},
2145 };
2146 
2147 static const struct {
2148 	const char *name;
2149 	const struct file_operations *fops;
2150 } intel_display_debugfs_files[] = {
2151 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2152 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2153 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2154 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2155 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
2156 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
2157 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
2158 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
2159 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2160 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2161 	{"i915_ipc_status", &i915_ipc_status_fops},
2162 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
2163 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2164 };
2165 
2166 void intel_display_debugfs_register(struct drm_i915_private *i915)
2167 {
2168 	struct drm_minor *minor = i915->drm.primary;
2169 	int i;
2170 
2171 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2172 		debugfs_create_file(intel_display_debugfs_files[i].name,
2173 				    S_IRUGO | S_IWUSR,
2174 				    minor->debugfs_root,
2175 				    to_i915(minor->dev),
2176 				    intel_display_debugfs_files[i].fops);
2177 	}
2178 
2179 	drm_debugfs_create_files(intel_display_debugfs_list,
2180 				 ARRAY_SIZE(intel_display_debugfs_list),
2181 				 minor->debugfs_root, minor);
2182 }
2183 
2184 static int i915_panel_show(struct seq_file *m, void *data)
2185 {
2186 	struct drm_connector *connector = m->private;
2187 	struct intel_dp *intel_dp =
2188 		intel_attached_dp(to_intel_connector(connector));
2189 
2190 	if (connector->status != connector_status_connected)
2191 		return -ENODEV;
2192 
2193 	seq_printf(m, "Panel power up delay: %d\n",
2194 		   intel_dp->pps.panel_power_up_delay);
2195 	seq_printf(m, "Panel power down delay: %d\n",
2196 		   intel_dp->pps.panel_power_down_delay);
2197 	seq_printf(m, "Backlight on delay: %d\n",
2198 		   intel_dp->pps.backlight_on_delay);
2199 	seq_printf(m, "Backlight off delay: %d\n",
2200 		   intel_dp->pps.backlight_off_delay);
2201 
2202 	return 0;
2203 }
2204 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2205 
2206 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2207 {
2208 	struct drm_connector *connector = m->private;
2209 	struct drm_i915_private *i915 = to_i915(connector->dev);
2210 	struct intel_connector *intel_connector = to_intel_connector(connector);
2211 	int ret;
2212 
2213 	ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
2214 	if (ret)
2215 		return ret;
2216 
2217 	if (!connector->encoder || connector->status != connector_status_connected) {
2218 		ret = -ENODEV;
2219 		goto out;
2220 	}
2221 
2222 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2223 		   connector->base.id);
2224 	intel_hdcp_info(m, intel_connector);
2225 
2226 out:
2227 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
2228 
2229 	return ret;
2230 }
2231 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2232 
2233 static int i915_psr_status_show(struct seq_file *m, void *data)
2234 {
2235 	struct drm_connector *connector = m->private;
2236 	struct intel_dp *intel_dp =
2237 		intel_attached_dp(to_intel_connector(connector));
2238 
2239 	return intel_psr_status(m, intel_dp);
2240 }
2241 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2242 
2243 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2244 				seq_puts(m, "LPSP: incapable\n"))
2245 
2246 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2247 {
2248 	struct drm_connector *connector = m->private;
2249 	struct drm_i915_private *i915 = to_i915(connector->dev);
2250 	struct intel_encoder *encoder;
2251 
2252 	encoder = intel_attached_encoder(to_intel_connector(connector));
2253 	if (!encoder)
2254 		return -ENODEV;
2255 
2256 	if (connector->status != connector_status_connected)
2257 		return -ENODEV;
2258 
2259 	if (DISPLAY_VER(i915) >= 13) {
2260 		LPSP_CAPABLE(encoder->port <= PORT_B);
2261 		return 0;
2262 	}
2263 
2264 	switch (DISPLAY_VER(i915)) {
2265 	case 12:
2266 		/*
2267 		 * Actually TGL can drive LPSP on port till DDI_C
2268 		 * but there is no physical connected DDI_C on TGL sku's,
2269 		 * even driver is not initilizing DDI_C port for gen12.
2270 		 */
2271 		LPSP_CAPABLE(encoder->port <= PORT_B);
2272 		break;
2273 	case 11:
2274 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2275 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2276 		break;
2277 	case 10:
2278 	case 9:
2279 		LPSP_CAPABLE(encoder->port == PORT_A &&
2280 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2281 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2282 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2283 		break;
2284 	default:
2285 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2286 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2287 	}
2288 
2289 	return 0;
2290 }
2291 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2292 
2293 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2294 {
2295 	struct drm_connector *connector = m->private;
2296 	struct drm_device *dev = connector->dev;
2297 	struct drm_crtc *crtc;
2298 	struct intel_dp *intel_dp;
2299 	struct drm_modeset_acquire_ctx ctx;
2300 	struct intel_crtc_state *crtc_state = NULL;
2301 	int ret = 0;
2302 	bool try_again = false;
2303 
2304 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2305 
2306 	do {
2307 		try_again = false;
2308 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2309 				       &ctx);
2310 		if (ret) {
2311 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2312 				try_again = true;
2313 				continue;
2314 			}
2315 			break;
2316 		}
2317 		crtc = connector->state->crtc;
2318 		if (connector->status != connector_status_connected || !crtc) {
2319 			ret = -ENODEV;
2320 			break;
2321 		}
2322 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2323 		if (ret == -EDEADLK) {
2324 			ret = drm_modeset_backoff(&ctx);
2325 			if (!ret) {
2326 				try_again = true;
2327 				continue;
2328 			}
2329 			break;
2330 		} else if (ret) {
2331 			break;
2332 		}
2333 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2334 		crtc_state = to_intel_crtc_state(crtc->state);
2335 		seq_printf(m, "DSC_Enabled: %s\n",
2336 			   yesno(crtc_state->dsc.compression_enable));
2337 		seq_printf(m, "DSC_Sink_Support: %s\n",
2338 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2339 		seq_printf(m, "Force_DSC_Enable: %s\n",
2340 			   yesno(intel_dp->force_dsc_en));
2341 		if (!intel_dp_is_edp(intel_dp))
2342 			seq_printf(m, "FEC_Sink_Support: %s\n",
2343 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2344 	} while (try_again);
2345 
2346 	drm_modeset_drop_locks(&ctx);
2347 	drm_modeset_acquire_fini(&ctx);
2348 
2349 	return ret;
2350 }
2351 
2352 static ssize_t i915_dsc_fec_support_write(struct file *file,
2353 					  const char __user *ubuf,
2354 					  size_t len, loff_t *offp)
2355 {
2356 	bool dsc_enable = false;
2357 	int ret;
2358 	struct drm_connector *connector =
2359 		((struct seq_file *)file->private_data)->private;
2360 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2361 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2362 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2363 
2364 	if (len == 0)
2365 		return 0;
2366 
2367 	drm_dbg(&i915->drm,
2368 		"Copied %zu bytes from user to force DSC\n", len);
2369 
2370 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2371 	if (ret < 0)
2372 		return ret;
2373 
2374 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2375 		(dsc_enable) ? "true" : "false");
2376 	intel_dp->force_dsc_en = dsc_enable;
2377 
2378 	*offp += len;
2379 	return len;
2380 }
2381 
2382 static int i915_dsc_fec_support_open(struct inode *inode,
2383 				     struct file *file)
2384 {
2385 	return single_open(file, i915_dsc_fec_support_show,
2386 			   inode->i_private);
2387 }
2388 
2389 static const struct file_operations i915_dsc_fec_support_fops = {
2390 	.owner = THIS_MODULE,
2391 	.open = i915_dsc_fec_support_open,
2392 	.read = seq_read,
2393 	.llseek = seq_lseek,
2394 	.release = single_release,
2395 	.write = i915_dsc_fec_support_write
2396 };
2397 
2398 static int i915_dsc_bpp_show(struct seq_file *m, void *data)
2399 {
2400 	struct drm_connector *connector = m->private;
2401 	struct drm_device *dev = connector->dev;
2402 	struct drm_crtc *crtc;
2403 	struct intel_crtc_state *crtc_state;
2404 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2405 	int ret;
2406 
2407 	if (!encoder)
2408 		return -ENODEV;
2409 
2410 	ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
2411 	if (ret)
2412 		return ret;
2413 
2414 	crtc = connector->state->crtc;
2415 	if (connector->status != connector_status_connected || !crtc) {
2416 		ret = -ENODEV;
2417 		goto out;
2418 	}
2419 
2420 	crtc_state = to_intel_crtc_state(crtc->state);
2421 	seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp);
2422 
2423 out:	drm_modeset_unlock(&dev->mode_config.connection_mutex);
2424 
2425 	return ret;
2426 }
2427 
2428 static ssize_t i915_dsc_bpp_write(struct file *file,
2429 				  const char __user *ubuf,
2430 				  size_t len, loff_t *offp)
2431 {
2432 	struct drm_connector *connector =
2433 		((struct seq_file *)file->private_data)->private;
2434 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2435 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2436 	int dsc_bpp = 0;
2437 	int ret;
2438 
2439 	ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp);
2440 	if (ret < 0)
2441 		return ret;
2442 
2443 	intel_dp->force_dsc_bpp = dsc_bpp;
2444 	*offp += len;
2445 
2446 	return len;
2447 }
2448 
2449 static int i915_dsc_bpp_open(struct inode *inode,
2450 			     struct file *file)
2451 {
2452 	return single_open(file, i915_dsc_bpp_show,
2453 			   inode->i_private);
2454 }
2455 
2456 static const struct file_operations i915_dsc_bpp_fops = {
2457 	.owner = THIS_MODULE,
2458 	.open = i915_dsc_bpp_open,
2459 	.read = seq_read,
2460 	.llseek = seq_lseek,
2461 	.release = single_release,
2462 	.write = i915_dsc_bpp_write
2463 };
2464 
2465 /**
2466  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2467  * @connector: pointer to a registered drm_connector
2468  *
2469  * Cleanup will be done by drm_connector_unregister() through a call to
2470  * drm_debugfs_connector_remove().
2471  *
2472  * Returns 0 on success, negative error codes on error.
2473  */
2474 int intel_connector_debugfs_add(struct drm_connector *connector)
2475 {
2476 	struct dentry *root = connector->debugfs_entry;
2477 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2478 
2479 	/* The connector must have been registered beforehands. */
2480 	if (!root)
2481 		return -ENODEV;
2482 
2483 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2484 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2485 				    connector, &i915_panel_fops);
2486 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2487 				    connector, &i915_psr_sink_status_fops);
2488 	}
2489 
2490 	if (HAS_PSR(dev_priv) &&
2491 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2492 		debugfs_create_file("i915_psr_status", 0444, root,
2493 				    connector, &i915_psr_status_fops);
2494 	}
2495 
2496 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2497 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2498 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2499 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2500 				    connector, &i915_hdcp_sink_capability_fops);
2501 	}
2502 
2503 	if (DISPLAY_VER(dev_priv) >= 11 &&
2504 	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2505 	    !to_intel_connector(connector)->mst_port) ||
2506 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
2507 		debugfs_create_file("i915_dsc_fec_support", 0644, root,
2508 				    connector, &i915_dsc_fec_support_fops);
2509 
2510 		debugfs_create_file("i915_dsc_bpp", 0644, root,
2511 				    connector, &i915_dsc_bpp_fops);
2512 	}
2513 
2514 	/* Legacy panels doesn't lpsp on any platform */
2515 	if ((DISPLAY_VER(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2516 	     IS_BROADWELL(dev_priv)) &&
2517 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2518 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2519 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2520 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2521 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2522 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2523 				    connector, &i915_lpsp_capability_fops);
2524 
2525 	return 0;
2526 }
2527 
2528 /**
2529  * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
2530  * @crtc: pointer to a drm_crtc
2531  *
2532  * Returns 0 on success, negative error codes on error.
2533  *
2534  * Failure to add debugfs entries should generally be ignored.
2535  */
2536 int intel_crtc_debugfs_add(struct drm_crtc *crtc)
2537 {
2538 	if (!crtc->debugfs_entry)
2539 		return -ENODEV;
2540 
2541 	crtc_updates_add(crtc);
2542 	return 0;
2543 }
2544