1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_types.h"
13 #include "intel_dp.h"
14 #include "intel_fbc.h"
15 #include "intel_hdcp.h"
16 #include "intel_hdmi.h"
17 #include "intel_pm.h"
18 #include "intel_psr.h"
19 #include "intel_sideband.h"
20 
21 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
22 {
23 	return to_i915(node->minor->dev);
24 }
25 
26 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
27 {
28 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
29 
30 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
31 		   dev_priv->fb_tracking.busy_bits);
32 
33 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
34 		   dev_priv->fb_tracking.flip_bits);
35 
36 	return 0;
37 }
38 
39 static int i915_fbc_status(struct seq_file *m, void *unused)
40 {
41 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
42 	struct intel_fbc *fbc = &dev_priv->fbc;
43 	intel_wakeref_t wakeref;
44 
45 	if (!HAS_FBC(dev_priv))
46 		return -ENODEV;
47 
48 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
49 	mutex_lock(&fbc->lock);
50 
51 	if (intel_fbc_is_active(dev_priv))
52 		seq_puts(m, "FBC enabled\n");
53 	else
54 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
55 
56 	if (intel_fbc_is_active(dev_priv)) {
57 		u32 mask;
58 
59 		if (INTEL_GEN(dev_priv) >= 8)
60 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
61 		else if (INTEL_GEN(dev_priv) >= 7)
62 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
63 		else if (INTEL_GEN(dev_priv) >= 5)
64 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
65 		else if (IS_G4X(dev_priv))
66 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
67 		else
68 			mask = intel_de_read(dev_priv, FBC_STATUS) &
69 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
70 
71 		seq_printf(m, "Compressing: %s\n", yesno(mask));
72 	}
73 
74 	mutex_unlock(&fbc->lock);
75 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
76 
77 	return 0;
78 }
79 
80 static int i915_fbc_false_color_get(void *data, u64 *val)
81 {
82 	struct drm_i915_private *dev_priv = data;
83 
84 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
85 		return -ENODEV;
86 
87 	*val = dev_priv->fbc.false_color;
88 
89 	return 0;
90 }
91 
92 static int i915_fbc_false_color_set(void *data, u64 val)
93 {
94 	struct drm_i915_private *dev_priv = data;
95 	u32 reg;
96 
97 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
98 		return -ENODEV;
99 
100 	mutex_lock(&dev_priv->fbc.lock);
101 
102 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
103 	dev_priv->fbc.false_color = val;
104 
105 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
106 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
107 
108 	mutex_unlock(&dev_priv->fbc.lock);
109 	return 0;
110 }
111 
112 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
113 			i915_fbc_false_color_get, i915_fbc_false_color_set,
114 			"%llu\n");
115 
116 static int i915_ips_status(struct seq_file *m, void *unused)
117 {
118 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
119 	intel_wakeref_t wakeref;
120 
121 	if (!HAS_IPS(dev_priv))
122 		return -ENODEV;
123 
124 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
125 
126 	seq_printf(m, "Enabled by kernel parameter: %s\n",
127 		   yesno(i915_modparams.enable_ips));
128 
129 	if (INTEL_GEN(dev_priv) >= 8) {
130 		seq_puts(m, "Currently: unknown\n");
131 	} else {
132 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
133 			seq_puts(m, "Currently: enabled\n");
134 		else
135 			seq_puts(m, "Currently: disabled\n");
136 	}
137 
138 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
139 
140 	return 0;
141 }
142 
143 static int i915_sr_status(struct seq_file *m, void *unused)
144 {
145 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
146 	intel_wakeref_t wakeref;
147 	bool sr_enabled = false;
148 
149 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
150 
151 	if (INTEL_GEN(dev_priv) >= 9)
152 		/* no global SR status; inspect per-plane WM */;
153 	else if (HAS_PCH_SPLIT(dev_priv))
154 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
155 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
156 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
157 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
158 	else if (IS_I915GM(dev_priv))
159 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
160 	else if (IS_PINEVIEW(dev_priv))
161 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
162 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
163 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
164 
165 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
166 
167 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
168 
169 	return 0;
170 }
171 
172 static int i915_opregion(struct seq_file *m, void *unused)
173 {
174 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
175 
176 	if (opregion->header)
177 		seq_write(m, opregion->header, OPREGION_SIZE);
178 
179 	return 0;
180 }
181 
182 static int i915_vbt(struct seq_file *m, void *unused)
183 {
184 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
185 
186 	if (opregion->vbt)
187 		seq_write(m, opregion->vbt, opregion->vbt_size);
188 
189 	return 0;
190 }
191 
192 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
193 {
194 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
195 	struct drm_device *dev = &dev_priv->drm;
196 	struct intel_framebuffer *fbdev_fb = NULL;
197 	struct drm_framebuffer *drm_fb;
198 
199 #ifdef CONFIG_DRM_FBDEV_EMULATION
200 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
201 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
202 
203 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
204 			   fbdev_fb->base.width,
205 			   fbdev_fb->base.height,
206 			   fbdev_fb->base.format->depth,
207 			   fbdev_fb->base.format->cpp[0] * 8,
208 			   fbdev_fb->base.modifier,
209 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
210 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
211 		seq_putc(m, '\n');
212 	}
213 #endif
214 
215 	mutex_lock(&dev->mode_config.fb_lock);
216 	drm_for_each_fb(drm_fb, dev) {
217 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
218 		if (fb == fbdev_fb)
219 			continue;
220 
221 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
222 			   fb->base.width,
223 			   fb->base.height,
224 			   fb->base.format->depth,
225 			   fb->base.format->cpp[0] * 8,
226 			   fb->base.modifier,
227 			   drm_framebuffer_read_refcount(&fb->base));
228 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
229 		seq_putc(m, '\n');
230 	}
231 	mutex_unlock(&dev->mode_config.fb_lock);
232 
233 	return 0;
234 }
235 
236 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
237 {
238 	u8 val;
239 	static const char * const sink_status[] = {
240 		"inactive",
241 		"transition to active, capture and display",
242 		"active, display from RFB",
243 		"active, capture and display on sink device timings",
244 		"transition to inactive, capture and display, timing re-sync",
245 		"reserved",
246 		"reserved",
247 		"sink internal error",
248 	};
249 	struct drm_connector *connector = m->private;
250 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
251 	struct intel_dp *intel_dp =
252 		intel_attached_dp(to_intel_connector(connector));
253 	int ret;
254 
255 	if (!CAN_PSR(dev_priv)) {
256 		seq_puts(m, "PSR Unsupported\n");
257 		return -ENODEV;
258 	}
259 
260 	if (connector->status != connector_status_connected)
261 		return -ENODEV;
262 
263 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
264 
265 	if (ret == 1) {
266 		const char *str = "unknown";
267 
268 		val &= DP_PSR_SINK_STATE_MASK;
269 		if (val < ARRAY_SIZE(sink_status))
270 			str = sink_status[val];
271 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
272 	} else {
273 		return ret;
274 	}
275 
276 	return 0;
277 }
278 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
279 
280 static void
281 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
282 {
283 	u32 val, status_val;
284 	const char *status = "unknown";
285 
286 	if (dev_priv->psr.psr2_enabled) {
287 		static const char * const live_status[] = {
288 			"IDLE",
289 			"CAPTURE",
290 			"CAPTURE_FS",
291 			"SLEEP",
292 			"BUFON_FW",
293 			"ML_UP",
294 			"SU_STANDBY",
295 			"FAST_SLEEP",
296 			"DEEP_SLEEP",
297 			"BUF_ON",
298 			"TG_ON"
299 		};
300 		val = intel_de_read(dev_priv,
301 				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
302 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
303 			      EDP_PSR2_STATUS_STATE_SHIFT;
304 		if (status_val < ARRAY_SIZE(live_status))
305 			status = live_status[status_val];
306 	} else {
307 		static const char * const live_status[] = {
308 			"IDLE",
309 			"SRDONACK",
310 			"SRDENT",
311 			"BUFOFF",
312 			"BUFON",
313 			"AUXACK",
314 			"SRDOFFACK",
315 			"SRDENT_ON",
316 		};
317 		val = intel_de_read(dev_priv,
318 				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
319 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
320 			      EDP_PSR_STATUS_STATE_SHIFT;
321 		if (status_val < ARRAY_SIZE(live_status))
322 			status = live_status[status_val];
323 	}
324 
325 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
326 }
327 
328 static int i915_edp_psr_status(struct seq_file *m, void *data)
329 {
330 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
331 	struct i915_psr *psr = &dev_priv->psr;
332 	intel_wakeref_t wakeref;
333 	const char *status;
334 	bool enabled;
335 	u32 val;
336 
337 	if (!HAS_PSR(dev_priv))
338 		return -ENODEV;
339 
340 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
341 	if (psr->dp)
342 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
343 	seq_puts(m, "\n");
344 
345 	if (!psr->sink_support)
346 		return 0;
347 
348 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
349 	mutex_lock(&psr->lock);
350 
351 	if (psr->enabled)
352 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
353 	else
354 		status = "disabled";
355 	seq_printf(m, "PSR mode: %s\n", status);
356 
357 	if (!psr->enabled) {
358 		seq_printf(m, "PSR sink not reliable: %s\n",
359 			   yesno(psr->sink_not_reliable));
360 
361 		goto unlock;
362 	}
363 
364 	if (psr->psr2_enabled) {
365 		val = intel_de_read(dev_priv,
366 				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
367 		enabled = val & EDP_PSR2_ENABLE;
368 	} else {
369 		val = intel_de_read(dev_priv,
370 				    EDP_PSR_CTL(dev_priv->psr.transcoder));
371 		enabled = val & EDP_PSR_ENABLE;
372 	}
373 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
374 		   enableddisabled(enabled), val);
375 	psr_source_status(dev_priv, m);
376 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
377 		   psr->busy_frontbuffer_bits);
378 
379 	/*
380 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
381 	 */
382 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
383 		val = intel_de_read(dev_priv,
384 				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
385 		val &= EDP_PSR_PERF_CNT_MASK;
386 		seq_printf(m, "Performance counter: %u\n", val);
387 	}
388 
389 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
390 		seq_printf(m, "Last attempted entry at: %lld\n",
391 			   psr->last_entry_attempt);
392 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
393 	}
394 
395 	if (psr->psr2_enabled) {
396 		u32 su_frames_val[3];
397 		int frame;
398 
399 		/*
400 		 * Reading all 3 registers before hand to minimize crossing a
401 		 * frame boundary between register reads
402 		 */
403 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
404 			val = intel_de_read(dev_priv,
405 					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
406 			su_frames_val[frame / 3] = val;
407 		}
408 
409 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
410 
411 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
412 			u32 su_blocks;
413 
414 			su_blocks = su_frames_val[frame / 3] &
415 				    PSR2_SU_STATUS_MASK(frame);
416 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
417 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
418 		}
419 	}
420 
421 unlock:
422 	mutex_unlock(&psr->lock);
423 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
424 
425 	return 0;
426 }
427 
428 static int
429 i915_edp_psr_debug_set(void *data, u64 val)
430 {
431 	struct drm_i915_private *dev_priv = data;
432 	intel_wakeref_t wakeref;
433 	int ret;
434 
435 	if (!CAN_PSR(dev_priv))
436 		return -ENODEV;
437 
438 	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
439 
440 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
441 
442 	ret = intel_psr_debug_set(dev_priv, val);
443 
444 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
445 
446 	return ret;
447 }
448 
449 static int
450 i915_edp_psr_debug_get(void *data, u64 *val)
451 {
452 	struct drm_i915_private *dev_priv = data;
453 
454 	if (!CAN_PSR(dev_priv))
455 		return -ENODEV;
456 
457 	*val = READ_ONCE(dev_priv->psr.debug);
458 	return 0;
459 }
460 
461 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
462 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
463 			"%llu\n");
464 
465 static int i915_power_domain_info(struct seq_file *m, void *unused)
466 {
467 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
468 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
469 	int i;
470 
471 	mutex_lock(&power_domains->lock);
472 
473 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
474 	for (i = 0; i < power_domains->power_well_count; i++) {
475 		struct i915_power_well *power_well;
476 		enum intel_display_power_domain power_domain;
477 
478 		power_well = &power_domains->power_wells[i];
479 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
480 			   power_well->count);
481 
482 		for_each_power_domain(power_domain, power_well->desc->domains)
483 			seq_printf(m, "  %-23s %d\n",
484 				 intel_display_power_domain_str(power_domain),
485 				 power_domains->domain_use_count[power_domain]);
486 	}
487 
488 	mutex_unlock(&power_domains->lock);
489 
490 	return 0;
491 }
492 
493 static int i915_dmc_info(struct seq_file *m, void *unused)
494 {
495 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
496 	intel_wakeref_t wakeref;
497 	struct intel_csr *csr;
498 	i915_reg_t dc5_reg, dc6_reg = {};
499 
500 	if (!HAS_CSR(dev_priv))
501 		return -ENODEV;
502 
503 	csr = &dev_priv->csr;
504 
505 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
506 
507 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
508 	seq_printf(m, "path: %s\n", csr->fw_path);
509 
510 	if (!csr->dmc_payload)
511 		goto out;
512 
513 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
514 		   CSR_VERSION_MINOR(csr->version));
515 
516 	if (INTEL_GEN(dev_priv) >= 12) {
517 		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
518 		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
519 		/*
520 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
521 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
522 		 * reg for DC3CO debugging and validation,
523 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
524 		 */
525 		seq_printf(m, "DC3CO count: %d\n",
526 			   intel_de_read(dev_priv, DMC_DEBUG3));
527 	} else {
528 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
529 						 SKL_CSR_DC3_DC5_COUNT;
530 		if (!IS_GEN9_LP(dev_priv))
531 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
532 	}
533 
534 	seq_printf(m, "DC3 -> DC5 count: %d\n",
535 		   intel_de_read(dev_priv, dc5_reg));
536 	if (dc6_reg.reg)
537 		seq_printf(m, "DC5 -> DC6 count: %d\n",
538 			   intel_de_read(dev_priv, dc6_reg));
539 
540 out:
541 	seq_printf(m, "program base: 0x%08x\n",
542 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
543 	seq_printf(m, "ssp base: 0x%08x\n",
544 		   intel_de_read(dev_priv, CSR_SSP_BASE));
545 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
546 
547 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
548 
549 	return 0;
550 }
551 
552 static void intel_seq_print_mode(struct seq_file *m, int tabs,
553 				 const struct drm_display_mode *mode)
554 {
555 	int i;
556 
557 	for (i = 0; i < tabs; i++)
558 		seq_putc(m, '\t');
559 
560 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
561 }
562 
563 static void intel_encoder_info(struct seq_file *m,
564 			       struct intel_crtc *crtc,
565 			       struct intel_encoder *encoder)
566 {
567 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
568 	struct drm_connector_list_iter conn_iter;
569 	struct drm_connector *connector;
570 
571 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
572 		   encoder->base.base.id, encoder->base.name);
573 
574 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
575 	drm_for_each_connector_iter(connector, &conn_iter) {
576 		const struct drm_connector_state *conn_state =
577 			connector->state;
578 
579 		if (conn_state->best_encoder != &encoder->base)
580 			continue;
581 
582 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
583 			   connector->base.id, connector->name);
584 	}
585 	drm_connector_list_iter_end(&conn_iter);
586 }
587 
588 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
589 {
590 	const struct drm_display_mode *mode = panel->fixed_mode;
591 
592 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
593 }
594 
595 static void intel_hdcp_info(struct seq_file *m,
596 			    struct intel_connector *intel_connector)
597 {
598 	bool hdcp_cap, hdcp2_cap;
599 
600 	hdcp_cap = intel_hdcp_capable(intel_connector);
601 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
602 
603 	if (hdcp_cap)
604 		seq_puts(m, "HDCP1.4 ");
605 	if (hdcp2_cap)
606 		seq_puts(m, "HDCP2.2 ");
607 
608 	if (!hdcp_cap && !hdcp2_cap)
609 		seq_puts(m, "None");
610 
611 	seq_puts(m, "\n");
612 }
613 
614 static void intel_dp_info(struct seq_file *m,
615 			  struct intel_connector *intel_connector)
616 {
617 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
618 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
619 
620 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
621 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
622 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
623 		intel_panel_info(m, &intel_connector->panel);
624 
625 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
626 				&intel_dp->aux);
627 	if (intel_connector->hdcp.shim) {
628 		seq_puts(m, "\tHDCP version: ");
629 		intel_hdcp_info(m, intel_connector);
630 	}
631 }
632 
633 static void intel_dp_mst_info(struct seq_file *m,
634 			      struct intel_connector *intel_connector)
635 {
636 	bool has_audio = intel_connector->port->has_audio;
637 
638 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
639 }
640 
641 static void intel_hdmi_info(struct seq_file *m,
642 			    struct intel_connector *intel_connector)
643 {
644 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
645 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
646 
647 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
648 	if (intel_connector->hdcp.shim) {
649 		seq_puts(m, "\tHDCP version: ");
650 		intel_hdcp_info(m, intel_connector);
651 	}
652 }
653 
654 static void intel_lvds_info(struct seq_file *m,
655 			    struct intel_connector *intel_connector)
656 {
657 	intel_panel_info(m, &intel_connector->panel);
658 }
659 
660 static void intel_connector_info(struct seq_file *m,
661 				 struct drm_connector *connector)
662 {
663 	struct intel_connector *intel_connector = to_intel_connector(connector);
664 	const struct drm_connector_state *conn_state = connector->state;
665 	struct intel_encoder *encoder =
666 		to_intel_encoder(conn_state->best_encoder);
667 	const struct drm_display_mode *mode;
668 
669 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
670 		   connector->base.id, connector->name,
671 		   drm_get_connector_status_name(connector->status));
672 
673 	if (connector->status == connector_status_disconnected)
674 		return;
675 
676 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
677 		   connector->display_info.width_mm,
678 		   connector->display_info.height_mm);
679 	seq_printf(m, "\tsubpixel order: %s\n",
680 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
681 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
682 
683 	if (!encoder)
684 		return;
685 
686 	switch (connector->connector_type) {
687 	case DRM_MODE_CONNECTOR_DisplayPort:
688 	case DRM_MODE_CONNECTOR_eDP:
689 		if (encoder->type == INTEL_OUTPUT_DP_MST)
690 			intel_dp_mst_info(m, intel_connector);
691 		else
692 			intel_dp_info(m, intel_connector);
693 		break;
694 	case DRM_MODE_CONNECTOR_LVDS:
695 		if (encoder->type == INTEL_OUTPUT_LVDS)
696 			intel_lvds_info(m, intel_connector);
697 		break;
698 	case DRM_MODE_CONNECTOR_HDMIA:
699 		if (encoder->type == INTEL_OUTPUT_HDMI ||
700 		    encoder->type == INTEL_OUTPUT_DDI)
701 			intel_hdmi_info(m, intel_connector);
702 		break;
703 	default:
704 		break;
705 	}
706 
707 	seq_printf(m, "\tmodes:\n");
708 	list_for_each_entry(mode, &connector->modes, head)
709 		intel_seq_print_mode(m, 2, mode);
710 }
711 
712 static const char *plane_type(enum drm_plane_type type)
713 {
714 	switch (type) {
715 	case DRM_PLANE_TYPE_OVERLAY:
716 		return "OVL";
717 	case DRM_PLANE_TYPE_PRIMARY:
718 		return "PRI";
719 	case DRM_PLANE_TYPE_CURSOR:
720 		return "CUR";
721 	/*
722 	 * Deliberately omitting default: to generate compiler warnings
723 	 * when a new drm_plane_type gets added.
724 	 */
725 	}
726 
727 	return "unknown";
728 }
729 
730 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
731 {
732 	/*
733 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
734 	 * will print them all to visualize if the values are misused
735 	 */
736 	snprintf(buf, bufsize,
737 		 "%s%s%s%s%s%s(0x%08x)",
738 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
739 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
740 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
741 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
742 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
743 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
744 		 rotation);
745 }
746 
747 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
748 {
749 	const struct intel_plane_state *plane_state =
750 		to_intel_plane_state(plane->base.state);
751 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
752 	struct drm_format_name_buf format_name;
753 	struct drm_rect src, dst;
754 	char rot_str[48];
755 
756 	src = drm_plane_state_src(&plane_state->uapi);
757 	dst = drm_plane_state_dest(&plane_state->uapi);
758 
759 	if (fb)
760 		drm_get_format_name(fb->format->format, &format_name);
761 
762 	plane_rotation(rot_str, sizeof(rot_str),
763 		       plane_state->uapi.rotation);
764 
765 	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
766 		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
767 		   fb ? fb->width : 0, fb ? fb->height : 0,
768 		   DRM_RECT_FP_ARG(&src),
769 		   DRM_RECT_ARG(&dst),
770 		   rot_str);
771 }
772 
773 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
774 {
775 	const struct intel_plane_state *plane_state =
776 		to_intel_plane_state(plane->base.state);
777 	const struct drm_framebuffer *fb = plane_state->hw.fb;
778 	struct drm_format_name_buf format_name;
779 	char rot_str[48];
780 
781 	if (!fb)
782 		return;
783 
784 	drm_get_format_name(fb->format->format, &format_name);
785 
786 	plane_rotation(rot_str, sizeof(rot_str),
787 		       plane_state->hw.rotation);
788 
789 	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
790 		   fb->base.id, format_name.str,
791 		   fb->width, fb->height,
792 		   yesno(plane_state->uapi.visible),
793 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
794 		   DRM_RECT_ARG(&plane_state->uapi.dst),
795 		   rot_str);
796 }
797 
798 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
799 {
800 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
801 	struct intel_plane *plane;
802 
803 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
804 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
805 			   plane->base.base.id, plane->base.name,
806 			   plane_type(plane->base.type));
807 		intel_plane_uapi_info(m, plane);
808 		intel_plane_hw_info(m, plane);
809 	}
810 }
811 
812 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
813 {
814 	const struct intel_crtc_state *crtc_state =
815 		to_intel_crtc_state(crtc->base.state);
816 	int num_scalers = crtc->num_scalers;
817 	int i;
818 
819 	/* Not all platformas have a scaler */
820 	if (num_scalers) {
821 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
822 			   num_scalers,
823 			   crtc_state->scaler_state.scaler_users,
824 			   crtc_state->scaler_state.scaler_id);
825 
826 		for (i = 0; i < num_scalers; i++) {
827 			const struct intel_scaler *sc =
828 				&crtc_state->scaler_state.scalers[i];
829 
830 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
831 				   i, yesno(sc->in_use), sc->mode);
832 		}
833 		seq_puts(m, "\n");
834 	} else {
835 		seq_puts(m, "\tNo scalers available on this platform\n");
836 	}
837 }
838 
839 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
840 {
841 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
842 	const struct intel_crtc_state *crtc_state =
843 		to_intel_crtc_state(crtc->base.state);
844 	struct intel_encoder *encoder;
845 
846 	seq_printf(m, "[CRTC:%d:%s]:\n",
847 		   crtc->base.base.id, crtc->base.name);
848 
849 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
850 		   yesno(crtc_state->uapi.enable),
851 		   yesno(crtc_state->uapi.active),
852 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
853 
854 	if (crtc_state->hw.enable) {
855 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
856 			   yesno(crtc_state->hw.active),
857 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
858 
859 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
860 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
861 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
862 
863 		intel_scaler_info(m, crtc);
864 	}
865 
866 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
867 				    crtc_state->uapi.encoder_mask)
868 		intel_encoder_info(m, crtc, encoder);
869 
870 	intel_plane_info(m, crtc);
871 
872 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
873 		   yesno(!crtc->cpu_fifo_underrun_disabled),
874 		   yesno(!crtc->pch_fifo_underrun_disabled));
875 }
876 
877 static int i915_display_info(struct seq_file *m, void *unused)
878 {
879 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
880 	struct drm_device *dev = &dev_priv->drm;
881 	struct intel_crtc *crtc;
882 	struct drm_connector *connector;
883 	struct drm_connector_list_iter conn_iter;
884 	intel_wakeref_t wakeref;
885 
886 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
887 
888 	drm_modeset_lock_all(dev);
889 
890 	seq_printf(m, "CRTC info\n");
891 	seq_printf(m, "---------\n");
892 	for_each_intel_crtc(dev, crtc)
893 		intel_crtc_info(m, crtc);
894 
895 	seq_printf(m, "\n");
896 	seq_printf(m, "Connector info\n");
897 	seq_printf(m, "--------------\n");
898 	drm_connector_list_iter_begin(dev, &conn_iter);
899 	drm_for_each_connector_iter(connector, &conn_iter)
900 		intel_connector_info(m, connector);
901 	drm_connector_list_iter_end(&conn_iter);
902 
903 	drm_modeset_unlock_all(dev);
904 
905 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
906 
907 	return 0;
908 }
909 
910 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
911 {
912 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
913 	struct drm_device *dev = &dev_priv->drm;
914 	int i;
915 
916 	drm_modeset_lock_all(dev);
917 
918 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
919 		   dev_priv->dpll.ref_clks.nssc,
920 		   dev_priv->dpll.ref_clks.ssc);
921 
922 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
923 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
924 
925 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
926 			   pll->info->id);
927 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
928 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
929 		seq_printf(m, " tracked hardware state:\n");
930 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
931 		seq_printf(m, " dpll_md: 0x%08x\n",
932 			   pll->state.hw_state.dpll_md);
933 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
934 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
935 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
936 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
937 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
938 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
939 			   pll->state.hw_state.mg_refclkin_ctl);
940 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
941 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
942 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
943 			   pll->state.hw_state.mg_clktop2_hsclkctl);
944 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
945 			   pll->state.hw_state.mg_pll_div0);
946 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
947 			   pll->state.hw_state.mg_pll_div1);
948 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
949 			   pll->state.hw_state.mg_pll_lf);
950 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
951 			   pll->state.hw_state.mg_pll_frac_lock);
952 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
953 			   pll->state.hw_state.mg_pll_ssc);
954 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
955 			   pll->state.hw_state.mg_pll_bias);
956 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
957 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
958 	}
959 	drm_modeset_unlock_all(dev);
960 
961 	return 0;
962 }
963 
964 static int i915_ipc_status_show(struct seq_file *m, void *data)
965 {
966 	struct drm_i915_private *dev_priv = m->private;
967 
968 	seq_printf(m, "Isochronous Priority Control: %s\n",
969 			yesno(dev_priv->ipc_enabled));
970 	return 0;
971 }
972 
973 static int i915_ipc_status_open(struct inode *inode, struct file *file)
974 {
975 	struct drm_i915_private *dev_priv = inode->i_private;
976 
977 	if (!HAS_IPC(dev_priv))
978 		return -ENODEV;
979 
980 	return single_open(file, i915_ipc_status_show, dev_priv);
981 }
982 
983 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
984 				     size_t len, loff_t *offp)
985 {
986 	struct seq_file *m = file->private_data;
987 	struct drm_i915_private *dev_priv = m->private;
988 	intel_wakeref_t wakeref;
989 	bool enable;
990 	int ret;
991 
992 	ret = kstrtobool_from_user(ubuf, len, &enable);
993 	if (ret < 0)
994 		return ret;
995 
996 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
997 		if (!dev_priv->ipc_enabled && enable)
998 			drm_info(&dev_priv->drm,
999 				 "Enabling IPC: WM will be proper only after next commit\n");
1000 		dev_priv->wm.distrust_bios_wm = true;
1001 		dev_priv->ipc_enabled = enable;
1002 		intel_enable_ipc(dev_priv);
1003 	}
1004 
1005 	return len;
1006 }
1007 
1008 static const struct file_operations i915_ipc_status_fops = {
1009 	.owner = THIS_MODULE,
1010 	.open = i915_ipc_status_open,
1011 	.read = seq_read,
1012 	.llseek = seq_lseek,
1013 	.release = single_release,
1014 	.write = i915_ipc_status_write
1015 };
1016 
1017 static int i915_ddb_info(struct seq_file *m, void *unused)
1018 {
1019 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1020 	struct drm_device *dev = &dev_priv->drm;
1021 	struct skl_ddb_entry *entry;
1022 	struct intel_crtc *crtc;
1023 
1024 	if (INTEL_GEN(dev_priv) < 9)
1025 		return -ENODEV;
1026 
1027 	drm_modeset_lock_all(dev);
1028 
1029 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1030 
1031 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1032 		struct intel_crtc_state *crtc_state =
1033 			to_intel_crtc_state(crtc->base.state);
1034 		enum pipe pipe = crtc->pipe;
1035 		enum plane_id plane_id;
1036 
1037 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1038 
1039 		for_each_plane_id_on_crtc(crtc, plane_id) {
1040 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1041 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1042 				   entry->start, entry->end,
1043 				   skl_ddb_entry_size(entry));
1044 		}
1045 
1046 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1047 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1048 			   entry->end, skl_ddb_entry_size(entry));
1049 	}
1050 
1051 	drm_modeset_unlock_all(dev);
1052 
1053 	return 0;
1054 }
1055 
1056 static void drrs_status_per_crtc(struct seq_file *m,
1057 				 struct drm_device *dev,
1058 				 struct intel_crtc *intel_crtc)
1059 {
1060 	struct drm_i915_private *dev_priv = to_i915(dev);
1061 	struct i915_drrs *drrs = &dev_priv->drrs;
1062 	int vrefresh = 0;
1063 	struct drm_connector *connector;
1064 	struct drm_connector_list_iter conn_iter;
1065 
1066 	drm_connector_list_iter_begin(dev, &conn_iter);
1067 	drm_for_each_connector_iter(connector, &conn_iter) {
1068 		if (connector->state->crtc != &intel_crtc->base)
1069 			continue;
1070 
1071 		seq_printf(m, "%s:\n", connector->name);
1072 	}
1073 	drm_connector_list_iter_end(&conn_iter);
1074 
1075 	seq_puts(m, "\n");
1076 
1077 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1078 		struct intel_panel *panel;
1079 
1080 		mutex_lock(&drrs->mutex);
1081 		/* DRRS Supported */
1082 		seq_puts(m, "\tDRRS Supported: Yes\n");
1083 
1084 		/* disable_drrs() will make drrs->dp NULL */
1085 		if (!drrs->dp) {
1086 			seq_puts(m, "Idleness DRRS: Disabled\n");
1087 			if (dev_priv->psr.enabled)
1088 				seq_puts(m,
1089 				"\tAs PSR is enabled, DRRS is not enabled\n");
1090 			mutex_unlock(&drrs->mutex);
1091 			return;
1092 		}
1093 
1094 		panel = &drrs->dp->attached_connector->panel;
1095 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1096 					drrs->busy_frontbuffer_bits);
1097 
1098 		seq_puts(m, "\n\t\t");
1099 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1100 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1101 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1102 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1103 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1104 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1105 		} else {
1106 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1107 						drrs->refresh_rate_type);
1108 			mutex_unlock(&drrs->mutex);
1109 			return;
1110 		}
1111 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1112 
1113 		seq_puts(m, "\n\t\t");
1114 		mutex_unlock(&drrs->mutex);
1115 	} else {
1116 		/* DRRS not supported. Print the VBT parameter*/
1117 		seq_puts(m, "\tDRRS Supported : No");
1118 	}
1119 	seq_puts(m, "\n");
1120 }
1121 
1122 static int i915_drrs_status(struct seq_file *m, void *unused)
1123 {
1124 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1125 	struct drm_device *dev = &dev_priv->drm;
1126 	struct intel_crtc *intel_crtc;
1127 	int active_crtc_cnt = 0;
1128 
1129 	drm_modeset_lock_all(dev);
1130 	for_each_intel_crtc(dev, intel_crtc) {
1131 		if (intel_crtc->base.state->active) {
1132 			active_crtc_cnt++;
1133 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1134 
1135 			drrs_status_per_crtc(m, dev, intel_crtc);
1136 		}
1137 	}
1138 	drm_modeset_unlock_all(dev);
1139 
1140 	if (!active_crtc_cnt)
1141 		seq_puts(m, "No active crtc found\n");
1142 
1143 	return 0;
1144 }
1145 
1146 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1147 {
1148 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1149 	struct drm_device *dev = &dev_priv->drm;
1150 	struct intel_encoder *intel_encoder;
1151 	struct intel_digital_port *intel_dig_port;
1152 	struct drm_connector *connector;
1153 	struct drm_connector_list_iter conn_iter;
1154 
1155 	drm_connector_list_iter_begin(dev, &conn_iter);
1156 	drm_for_each_connector_iter(connector, &conn_iter) {
1157 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1158 			continue;
1159 
1160 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1161 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1162 			continue;
1163 
1164 		intel_dig_port = enc_to_dig_port(intel_encoder);
1165 		if (!intel_dig_port->dp.can_mst)
1166 			continue;
1167 
1168 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1169 			   intel_dig_port->base.base.base.id,
1170 			   intel_dig_port->base.base.name);
1171 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
1172 	}
1173 	drm_connector_list_iter_end(&conn_iter);
1174 
1175 	return 0;
1176 }
1177 
1178 static ssize_t i915_displayport_test_active_write(struct file *file,
1179 						  const char __user *ubuf,
1180 						  size_t len, loff_t *offp)
1181 {
1182 	char *input_buffer;
1183 	int status = 0;
1184 	struct drm_device *dev;
1185 	struct drm_connector *connector;
1186 	struct drm_connector_list_iter conn_iter;
1187 	struct intel_dp *intel_dp;
1188 	int val = 0;
1189 
1190 	dev = ((struct seq_file *)file->private_data)->private;
1191 
1192 	if (len == 0)
1193 		return 0;
1194 
1195 	input_buffer = memdup_user_nul(ubuf, len);
1196 	if (IS_ERR(input_buffer))
1197 		return PTR_ERR(input_buffer);
1198 
1199 	drm_dbg(&to_i915(dev)->drm,
1200 		"Copied %d bytes from user\n", (unsigned int)len);
1201 
1202 	drm_connector_list_iter_begin(dev, &conn_iter);
1203 	drm_for_each_connector_iter(connector, &conn_iter) {
1204 		struct intel_encoder *encoder;
1205 
1206 		if (connector->connector_type !=
1207 		    DRM_MODE_CONNECTOR_DisplayPort)
1208 			continue;
1209 
1210 		encoder = to_intel_encoder(connector->encoder);
1211 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1212 			continue;
1213 
1214 		if (encoder && connector->status == connector_status_connected) {
1215 			intel_dp = enc_to_intel_dp(encoder);
1216 			status = kstrtoint(input_buffer, 10, &val);
1217 			if (status < 0)
1218 				break;
1219 			drm_dbg(&to_i915(dev)->drm,
1220 				"Got %d for test active\n", val);
1221 			/* To prevent erroneous activation of the compliance
1222 			 * testing code, only accept an actual value of 1 here
1223 			 */
1224 			if (val == 1)
1225 				intel_dp->compliance.test_active = true;
1226 			else
1227 				intel_dp->compliance.test_active = false;
1228 		}
1229 	}
1230 	drm_connector_list_iter_end(&conn_iter);
1231 	kfree(input_buffer);
1232 	if (status < 0)
1233 		return status;
1234 
1235 	*offp += len;
1236 	return len;
1237 }
1238 
1239 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1240 {
1241 	struct drm_i915_private *dev_priv = m->private;
1242 	struct drm_device *dev = &dev_priv->drm;
1243 	struct drm_connector *connector;
1244 	struct drm_connector_list_iter conn_iter;
1245 	struct intel_dp *intel_dp;
1246 
1247 	drm_connector_list_iter_begin(dev, &conn_iter);
1248 	drm_for_each_connector_iter(connector, &conn_iter) {
1249 		struct intel_encoder *encoder;
1250 
1251 		if (connector->connector_type !=
1252 		    DRM_MODE_CONNECTOR_DisplayPort)
1253 			continue;
1254 
1255 		encoder = to_intel_encoder(connector->encoder);
1256 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1257 			continue;
1258 
1259 		if (encoder && connector->status == connector_status_connected) {
1260 			intel_dp = enc_to_intel_dp(encoder);
1261 			if (intel_dp->compliance.test_active)
1262 				seq_puts(m, "1");
1263 			else
1264 				seq_puts(m, "0");
1265 		} else
1266 			seq_puts(m, "0");
1267 	}
1268 	drm_connector_list_iter_end(&conn_iter);
1269 
1270 	return 0;
1271 }
1272 
1273 static int i915_displayport_test_active_open(struct inode *inode,
1274 					     struct file *file)
1275 {
1276 	return single_open(file, i915_displayport_test_active_show,
1277 			   inode->i_private);
1278 }
1279 
1280 static const struct file_operations i915_displayport_test_active_fops = {
1281 	.owner = THIS_MODULE,
1282 	.open = i915_displayport_test_active_open,
1283 	.read = seq_read,
1284 	.llseek = seq_lseek,
1285 	.release = single_release,
1286 	.write = i915_displayport_test_active_write
1287 };
1288 
1289 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1290 {
1291 	struct drm_i915_private *dev_priv = m->private;
1292 	struct drm_device *dev = &dev_priv->drm;
1293 	struct drm_connector *connector;
1294 	struct drm_connector_list_iter conn_iter;
1295 	struct intel_dp *intel_dp;
1296 
1297 	drm_connector_list_iter_begin(dev, &conn_iter);
1298 	drm_for_each_connector_iter(connector, &conn_iter) {
1299 		struct intel_encoder *encoder;
1300 
1301 		if (connector->connector_type !=
1302 		    DRM_MODE_CONNECTOR_DisplayPort)
1303 			continue;
1304 
1305 		encoder = to_intel_encoder(connector->encoder);
1306 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1307 			continue;
1308 
1309 		if (encoder && connector->status == connector_status_connected) {
1310 			intel_dp = enc_to_intel_dp(encoder);
1311 			if (intel_dp->compliance.test_type ==
1312 			    DP_TEST_LINK_EDID_READ)
1313 				seq_printf(m, "%lx",
1314 					   intel_dp->compliance.test_data.edid);
1315 			else if (intel_dp->compliance.test_type ==
1316 				 DP_TEST_LINK_VIDEO_PATTERN) {
1317 				seq_printf(m, "hdisplay: %d\n",
1318 					   intel_dp->compliance.test_data.hdisplay);
1319 				seq_printf(m, "vdisplay: %d\n",
1320 					   intel_dp->compliance.test_data.vdisplay);
1321 				seq_printf(m, "bpc: %u\n",
1322 					   intel_dp->compliance.test_data.bpc);
1323 			} else if (intel_dp->compliance.test_type ==
1324 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1325 				seq_printf(m, "pattern: %d\n",
1326 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1327 				seq_printf(m, "Number of lanes: %d\n",
1328 					   intel_dp->compliance.test_data.phytest.num_lanes);
1329 				seq_printf(m, "Link Rate: %d\n",
1330 					   intel_dp->compliance.test_data.phytest.link_rate);
1331 				seq_printf(m, "level: %02x\n",
1332 					   intel_dp->train_set[0]);
1333 			}
1334 		} else
1335 			seq_puts(m, "0");
1336 	}
1337 	drm_connector_list_iter_end(&conn_iter);
1338 
1339 	return 0;
1340 }
1341 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1342 
1343 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1344 {
1345 	struct drm_i915_private *dev_priv = m->private;
1346 	struct drm_device *dev = &dev_priv->drm;
1347 	struct drm_connector *connector;
1348 	struct drm_connector_list_iter conn_iter;
1349 	struct intel_dp *intel_dp;
1350 
1351 	drm_connector_list_iter_begin(dev, &conn_iter);
1352 	drm_for_each_connector_iter(connector, &conn_iter) {
1353 		struct intel_encoder *encoder;
1354 
1355 		if (connector->connector_type !=
1356 		    DRM_MODE_CONNECTOR_DisplayPort)
1357 			continue;
1358 
1359 		encoder = to_intel_encoder(connector->encoder);
1360 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1361 			continue;
1362 
1363 		if (encoder && connector->status == connector_status_connected) {
1364 			intel_dp = enc_to_intel_dp(encoder);
1365 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1366 		} else
1367 			seq_puts(m, "0");
1368 	}
1369 	drm_connector_list_iter_end(&conn_iter);
1370 
1371 	return 0;
1372 }
1373 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1374 
1375 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1376 {
1377 	struct drm_i915_private *dev_priv = m->private;
1378 	struct drm_device *dev = &dev_priv->drm;
1379 	int level;
1380 	int num_levels;
1381 
1382 	if (IS_CHERRYVIEW(dev_priv))
1383 		num_levels = 3;
1384 	else if (IS_VALLEYVIEW(dev_priv))
1385 		num_levels = 1;
1386 	else if (IS_G4X(dev_priv))
1387 		num_levels = 3;
1388 	else
1389 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1390 
1391 	drm_modeset_lock_all(dev);
1392 
1393 	for (level = 0; level < num_levels; level++) {
1394 		unsigned int latency = wm[level];
1395 
1396 		/*
1397 		 * - WM1+ latency values in 0.5us units
1398 		 * - latencies are in us on gen9/vlv/chv
1399 		 */
1400 		if (INTEL_GEN(dev_priv) >= 9 ||
1401 		    IS_VALLEYVIEW(dev_priv) ||
1402 		    IS_CHERRYVIEW(dev_priv) ||
1403 		    IS_G4X(dev_priv))
1404 			latency *= 10;
1405 		else if (level > 0)
1406 			latency *= 5;
1407 
1408 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1409 			   level, wm[level], latency / 10, latency % 10);
1410 	}
1411 
1412 	drm_modeset_unlock_all(dev);
1413 }
1414 
1415 static int pri_wm_latency_show(struct seq_file *m, void *data)
1416 {
1417 	struct drm_i915_private *dev_priv = m->private;
1418 	const u16 *latencies;
1419 
1420 	if (INTEL_GEN(dev_priv) >= 9)
1421 		latencies = dev_priv->wm.skl_latency;
1422 	else
1423 		latencies = dev_priv->wm.pri_latency;
1424 
1425 	wm_latency_show(m, latencies);
1426 
1427 	return 0;
1428 }
1429 
1430 static int spr_wm_latency_show(struct seq_file *m, void *data)
1431 {
1432 	struct drm_i915_private *dev_priv = m->private;
1433 	const u16 *latencies;
1434 
1435 	if (INTEL_GEN(dev_priv) >= 9)
1436 		latencies = dev_priv->wm.skl_latency;
1437 	else
1438 		latencies = dev_priv->wm.spr_latency;
1439 
1440 	wm_latency_show(m, latencies);
1441 
1442 	return 0;
1443 }
1444 
1445 static int cur_wm_latency_show(struct seq_file *m, void *data)
1446 {
1447 	struct drm_i915_private *dev_priv = m->private;
1448 	const u16 *latencies;
1449 
1450 	if (INTEL_GEN(dev_priv) >= 9)
1451 		latencies = dev_priv->wm.skl_latency;
1452 	else
1453 		latencies = dev_priv->wm.cur_latency;
1454 
1455 	wm_latency_show(m, latencies);
1456 
1457 	return 0;
1458 }
1459 
1460 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1461 {
1462 	struct drm_i915_private *dev_priv = inode->i_private;
1463 
1464 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1465 		return -ENODEV;
1466 
1467 	return single_open(file, pri_wm_latency_show, dev_priv);
1468 }
1469 
1470 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1471 {
1472 	struct drm_i915_private *dev_priv = inode->i_private;
1473 
1474 	if (HAS_GMCH(dev_priv))
1475 		return -ENODEV;
1476 
1477 	return single_open(file, spr_wm_latency_show, dev_priv);
1478 }
1479 
1480 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1481 {
1482 	struct drm_i915_private *dev_priv = inode->i_private;
1483 
1484 	if (HAS_GMCH(dev_priv))
1485 		return -ENODEV;
1486 
1487 	return single_open(file, cur_wm_latency_show, dev_priv);
1488 }
1489 
1490 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1491 				size_t len, loff_t *offp, u16 wm[8])
1492 {
1493 	struct seq_file *m = file->private_data;
1494 	struct drm_i915_private *dev_priv = m->private;
1495 	struct drm_device *dev = &dev_priv->drm;
1496 	u16 new[8] = { 0 };
1497 	int num_levels;
1498 	int level;
1499 	int ret;
1500 	char tmp[32];
1501 
1502 	if (IS_CHERRYVIEW(dev_priv))
1503 		num_levels = 3;
1504 	else if (IS_VALLEYVIEW(dev_priv))
1505 		num_levels = 1;
1506 	else if (IS_G4X(dev_priv))
1507 		num_levels = 3;
1508 	else
1509 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1510 
1511 	if (len >= sizeof(tmp))
1512 		return -EINVAL;
1513 
1514 	if (copy_from_user(tmp, ubuf, len))
1515 		return -EFAULT;
1516 
1517 	tmp[len] = '\0';
1518 
1519 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1520 		     &new[0], &new[1], &new[2], &new[3],
1521 		     &new[4], &new[5], &new[6], &new[7]);
1522 	if (ret != num_levels)
1523 		return -EINVAL;
1524 
1525 	drm_modeset_lock_all(dev);
1526 
1527 	for (level = 0; level < num_levels; level++)
1528 		wm[level] = new[level];
1529 
1530 	drm_modeset_unlock_all(dev);
1531 
1532 	return len;
1533 }
1534 
1535 
1536 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1537 				    size_t len, loff_t *offp)
1538 {
1539 	struct seq_file *m = file->private_data;
1540 	struct drm_i915_private *dev_priv = m->private;
1541 	u16 *latencies;
1542 
1543 	if (INTEL_GEN(dev_priv) >= 9)
1544 		latencies = dev_priv->wm.skl_latency;
1545 	else
1546 		latencies = dev_priv->wm.pri_latency;
1547 
1548 	return wm_latency_write(file, ubuf, len, offp, latencies);
1549 }
1550 
1551 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1552 				    size_t len, loff_t *offp)
1553 {
1554 	struct seq_file *m = file->private_data;
1555 	struct drm_i915_private *dev_priv = m->private;
1556 	u16 *latencies;
1557 
1558 	if (INTEL_GEN(dev_priv) >= 9)
1559 		latencies = dev_priv->wm.skl_latency;
1560 	else
1561 		latencies = dev_priv->wm.spr_latency;
1562 
1563 	return wm_latency_write(file, ubuf, len, offp, latencies);
1564 }
1565 
1566 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1567 				    size_t len, loff_t *offp)
1568 {
1569 	struct seq_file *m = file->private_data;
1570 	struct drm_i915_private *dev_priv = m->private;
1571 	u16 *latencies;
1572 
1573 	if (INTEL_GEN(dev_priv) >= 9)
1574 		latencies = dev_priv->wm.skl_latency;
1575 	else
1576 		latencies = dev_priv->wm.cur_latency;
1577 
1578 	return wm_latency_write(file, ubuf, len, offp, latencies);
1579 }
1580 
1581 static const struct file_operations i915_pri_wm_latency_fops = {
1582 	.owner = THIS_MODULE,
1583 	.open = pri_wm_latency_open,
1584 	.read = seq_read,
1585 	.llseek = seq_lseek,
1586 	.release = single_release,
1587 	.write = pri_wm_latency_write
1588 };
1589 
1590 static const struct file_operations i915_spr_wm_latency_fops = {
1591 	.owner = THIS_MODULE,
1592 	.open = spr_wm_latency_open,
1593 	.read = seq_read,
1594 	.llseek = seq_lseek,
1595 	.release = single_release,
1596 	.write = spr_wm_latency_write
1597 };
1598 
1599 static const struct file_operations i915_cur_wm_latency_fops = {
1600 	.owner = THIS_MODULE,
1601 	.open = cur_wm_latency_open,
1602 	.read = seq_read,
1603 	.llseek = seq_lseek,
1604 	.release = single_release,
1605 	.write = cur_wm_latency_write
1606 };
1607 
1608 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1609 {
1610 	struct drm_i915_private *dev_priv = m->private;
1611 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1612 
1613 	/* Synchronize with everything first in case there's been an HPD
1614 	 * storm, but we haven't finished handling it in the kernel yet
1615 	 */
1616 	intel_synchronize_irq(dev_priv);
1617 	flush_work(&dev_priv->hotplug.dig_port_work);
1618 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1619 
1620 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1621 	seq_printf(m, "Detected: %s\n",
1622 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1623 
1624 	return 0;
1625 }
1626 
1627 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1628 					const char __user *ubuf, size_t len,
1629 					loff_t *offp)
1630 {
1631 	struct seq_file *m = file->private_data;
1632 	struct drm_i915_private *dev_priv = m->private;
1633 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1634 	unsigned int new_threshold;
1635 	int i;
1636 	char *newline;
1637 	char tmp[16];
1638 
1639 	if (len >= sizeof(tmp))
1640 		return -EINVAL;
1641 
1642 	if (copy_from_user(tmp, ubuf, len))
1643 		return -EFAULT;
1644 
1645 	tmp[len] = '\0';
1646 
1647 	/* Strip newline, if any */
1648 	newline = strchr(tmp, '\n');
1649 	if (newline)
1650 		*newline = '\0';
1651 
1652 	if (strcmp(tmp, "reset") == 0)
1653 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1654 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1655 		return -EINVAL;
1656 
1657 	if (new_threshold > 0)
1658 		drm_dbg_kms(&dev_priv->drm,
1659 			    "Setting HPD storm detection threshold to %d\n",
1660 			    new_threshold);
1661 	else
1662 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1663 
1664 	spin_lock_irq(&dev_priv->irq_lock);
1665 	hotplug->hpd_storm_threshold = new_threshold;
1666 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1667 	for_each_hpd_pin(i)
1668 		hotplug->stats[i].count = 0;
1669 	spin_unlock_irq(&dev_priv->irq_lock);
1670 
1671 	/* Re-enable hpd immediately if we were in an irq storm */
1672 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1673 
1674 	return len;
1675 }
1676 
1677 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1678 {
1679 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1680 }
1681 
1682 static const struct file_operations i915_hpd_storm_ctl_fops = {
1683 	.owner = THIS_MODULE,
1684 	.open = i915_hpd_storm_ctl_open,
1685 	.read = seq_read,
1686 	.llseek = seq_lseek,
1687 	.release = single_release,
1688 	.write = i915_hpd_storm_ctl_write
1689 };
1690 
1691 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1692 {
1693 	struct drm_i915_private *dev_priv = m->private;
1694 
1695 	seq_printf(m, "Enabled: %s\n",
1696 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1697 
1698 	return 0;
1699 }
1700 
1701 static int
1702 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1703 {
1704 	return single_open(file, i915_hpd_short_storm_ctl_show,
1705 			   inode->i_private);
1706 }
1707 
1708 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1709 					      const char __user *ubuf,
1710 					      size_t len, loff_t *offp)
1711 {
1712 	struct seq_file *m = file->private_data;
1713 	struct drm_i915_private *dev_priv = m->private;
1714 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1715 	char *newline;
1716 	char tmp[16];
1717 	int i;
1718 	bool new_state;
1719 
1720 	if (len >= sizeof(tmp))
1721 		return -EINVAL;
1722 
1723 	if (copy_from_user(tmp, ubuf, len))
1724 		return -EFAULT;
1725 
1726 	tmp[len] = '\0';
1727 
1728 	/* Strip newline, if any */
1729 	newline = strchr(tmp, '\n');
1730 	if (newline)
1731 		*newline = '\0';
1732 
1733 	/* Reset to the "default" state for this system */
1734 	if (strcmp(tmp, "reset") == 0)
1735 		new_state = !HAS_DP_MST(dev_priv);
1736 	else if (kstrtobool(tmp, &new_state) != 0)
1737 		return -EINVAL;
1738 
1739 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1740 		    new_state ? "En" : "Dis");
1741 
1742 	spin_lock_irq(&dev_priv->irq_lock);
1743 	hotplug->hpd_short_storm_enabled = new_state;
1744 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1745 	for_each_hpd_pin(i)
1746 		hotplug->stats[i].count = 0;
1747 	spin_unlock_irq(&dev_priv->irq_lock);
1748 
1749 	/* Re-enable hpd immediately if we were in an irq storm */
1750 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1751 
1752 	return len;
1753 }
1754 
1755 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1756 	.owner = THIS_MODULE,
1757 	.open = i915_hpd_short_storm_ctl_open,
1758 	.read = seq_read,
1759 	.llseek = seq_lseek,
1760 	.release = single_release,
1761 	.write = i915_hpd_short_storm_ctl_write,
1762 };
1763 
1764 static int i915_drrs_ctl_set(void *data, u64 val)
1765 {
1766 	struct drm_i915_private *dev_priv = data;
1767 	struct drm_device *dev = &dev_priv->drm;
1768 	struct intel_crtc *crtc;
1769 
1770 	if (INTEL_GEN(dev_priv) < 7)
1771 		return -ENODEV;
1772 
1773 	for_each_intel_crtc(dev, crtc) {
1774 		struct drm_connector_list_iter conn_iter;
1775 		struct intel_crtc_state *crtc_state;
1776 		struct drm_connector *connector;
1777 		struct drm_crtc_commit *commit;
1778 		int ret;
1779 
1780 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1781 		if (ret)
1782 			return ret;
1783 
1784 		crtc_state = to_intel_crtc_state(crtc->base.state);
1785 
1786 		if (!crtc_state->hw.active ||
1787 		    !crtc_state->has_drrs)
1788 			goto out;
1789 
1790 		commit = crtc_state->uapi.commit;
1791 		if (commit) {
1792 			ret = wait_for_completion_interruptible(&commit->hw_done);
1793 			if (ret)
1794 				goto out;
1795 		}
1796 
1797 		drm_connector_list_iter_begin(dev, &conn_iter);
1798 		drm_for_each_connector_iter(connector, &conn_iter) {
1799 			struct intel_encoder *encoder;
1800 			struct intel_dp *intel_dp;
1801 
1802 			if (!(crtc_state->uapi.connector_mask &
1803 			      drm_connector_mask(connector)))
1804 				continue;
1805 
1806 			encoder = intel_attached_encoder(to_intel_connector(connector));
1807 			if (encoder->type != INTEL_OUTPUT_EDP)
1808 				continue;
1809 
1810 			drm_dbg(&dev_priv->drm,
1811 				"Manually %sabling DRRS. %llu\n",
1812 				val ? "en" : "dis", val);
1813 
1814 			intel_dp = enc_to_intel_dp(encoder);
1815 			if (val)
1816 				intel_edp_drrs_enable(intel_dp,
1817 						      crtc_state);
1818 			else
1819 				intel_edp_drrs_disable(intel_dp,
1820 						       crtc_state);
1821 		}
1822 		drm_connector_list_iter_end(&conn_iter);
1823 
1824 out:
1825 		drm_modeset_unlock(&crtc->base.mutex);
1826 		if (ret)
1827 			return ret;
1828 	}
1829 
1830 	return 0;
1831 }
1832 
1833 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1834 
1835 static ssize_t
1836 i915_fifo_underrun_reset_write(struct file *filp,
1837 			       const char __user *ubuf,
1838 			       size_t cnt, loff_t *ppos)
1839 {
1840 	struct drm_i915_private *dev_priv = filp->private_data;
1841 	struct intel_crtc *intel_crtc;
1842 	struct drm_device *dev = &dev_priv->drm;
1843 	int ret;
1844 	bool reset;
1845 
1846 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1847 	if (ret)
1848 		return ret;
1849 
1850 	if (!reset)
1851 		return cnt;
1852 
1853 	for_each_intel_crtc(dev, intel_crtc) {
1854 		struct drm_crtc_commit *commit;
1855 		struct intel_crtc_state *crtc_state;
1856 
1857 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
1858 		if (ret)
1859 			return ret;
1860 
1861 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
1862 		commit = crtc_state->uapi.commit;
1863 		if (commit) {
1864 			ret = wait_for_completion_interruptible(&commit->hw_done);
1865 			if (!ret)
1866 				ret = wait_for_completion_interruptible(&commit->flip_done);
1867 		}
1868 
1869 		if (!ret && crtc_state->hw.active) {
1870 			drm_dbg_kms(&dev_priv->drm,
1871 				    "Re-arming FIFO underruns on pipe %c\n",
1872 				    pipe_name(intel_crtc->pipe));
1873 
1874 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
1875 		}
1876 
1877 		drm_modeset_unlock(&intel_crtc->base.mutex);
1878 
1879 		if (ret)
1880 			return ret;
1881 	}
1882 
1883 	ret = intel_fbc_reset_underrun(dev_priv);
1884 	if (ret)
1885 		return ret;
1886 
1887 	return cnt;
1888 }
1889 
1890 static const struct file_operations i915_fifo_underrun_reset_ops = {
1891 	.owner = THIS_MODULE,
1892 	.open = simple_open,
1893 	.write = i915_fifo_underrun_reset_write,
1894 	.llseek = default_llseek,
1895 };
1896 
1897 static const struct drm_info_list intel_display_debugfs_list[] = {
1898 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
1899 	{"i915_fbc_status", i915_fbc_status, 0},
1900 	{"i915_ips_status", i915_ips_status, 0},
1901 	{"i915_sr_status", i915_sr_status, 0},
1902 	{"i915_opregion", i915_opregion, 0},
1903 	{"i915_vbt", i915_vbt, 0},
1904 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1905 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
1906 	{"i915_power_domain_info", i915_power_domain_info, 0},
1907 	{"i915_dmc_info", i915_dmc_info, 0},
1908 	{"i915_display_info", i915_display_info, 0},
1909 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
1910 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
1911 	{"i915_ddb_info", i915_ddb_info, 0},
1912 	{"i915_drrs_status", i915_drrs_status, 0},
1913 };
1914 
1915 static const struct {
1916 	const char *name;
1917 	const struct file_operations *fops;
1918 } intel_display_debugfs_files[] = {
1919 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
1920 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
1921 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
1922 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
1923 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
1924 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
1925 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
1926 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
1927 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
1928 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
1929 	{"i915_ipc_status", &i915_ipc_status_fops},
1930 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
1931 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
1932 };
1933 
1934 void intel_display_debugfs_register(struct drm_i915_private *i915)
1935 {
1936 	struct drm_minor *minor = i915->drm.primary;
1937 	int i;
1938 
1939 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
1940 		debugfs_create_file(intel_display_debugfs_files[i].name,
1941 				    S_IRUGO | S_IWUSR,
1942 				    minor->debugfs_root,
1943 				    to_i915(minor->dev),
1944 				    intel_display_debugfs_files[i].fops);
1945 	}
1946 
1947 	drm_debugfs_create_files(intel_display_debugfs_list,
1948 				 ARRAY_SIZE(intel_display_debugfs_list),
1949 				 minor->debugfs_root, minor);
1950 }
1951 
1952 static int i915_panel_show(struct seq_file *m, void *data)
1953 {
1954 	struct drm_connector *connector = m->private;
1955 	struct intel_dp *intel_dp =
1956 		intel_attached_dp(to_intel_connector(connector));
1957 
1958 	if (connector->status != connector_status_connected)
1959 		return -ENODEV;
1960 
1961 	seq_printf(m, "Panel power up delay: %d\n",
1962 		   intel_dp->panel_power_up_delay);
1963 	seq_printf(m, "Panel power down delay: %d\n",
1964 		   intel_dp->panel_power_down_delay);
1965 	seq_printf(m, "Backlight on delay: %d\n",
1966 		   intel_dp->backlight_on_delay);
1967 	seq_printf(m, "Backlight off delay: %d\n",
1968 		   intel_dp->backlight_off_delay);
1969 
1970 	return 0;
1971 }
1972 DEFINE_SHOW_ATTRIBUTE(i915_panel);
1973 
1974 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
1975 {
1976 	struct drm_connector *connector = m->private;
1977 	struct intel_connector *intel_connector = to_intel_connector(connector);
1978 
1979 	if (connector->status != connector_status_connected)
1980 		return -ENODEV;
1981 
1982 	/* HDCP is supported by connector */
1983 	if (!intel_connector->hdcp.shim)
1984 		return -EINVAL;
1985 
1986 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
1987 		   connector->base.id);
1988 	intel_hdcp_info(m, intel_connector);
1989 
1990 	return 0;
1991 }
1992 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
1993 
1994 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
1995 {
1996 	struct drm_connector *connector = m->private;
1997 	struct drm_device *dev = connector->dev;
1998 	struct drm_crtc *crtc;
1999 	struct intel_dp *intel_dp;
2000 	struct drm_modeset_acquire_ctx ctx;
2001 	struct intel_crtc_state *crtc_state = NULL;
2002 	int ret = 0;
2003 	bool try_again = false;
2004 
2005 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2006 
2007 	do {
2008 		try_again = false;
2009 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2010 				       &ctx);
2011 		if (ret) {
2012 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2013 				try_again = true;
2014 				continue;
2015 			}
2016 			break;
2017 		}
2018 		crtc = connector->state->crtc;
2019 		if (connector->status != connector_status_connected || !crtc) {
2020 			ret = -ENODEV;
2021 			break;
2022 		}
2023 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2024 		if (ret == -EDEADLK) {
2025 			ret = drm_modeset_backoff(&ctx);
2026 			if (!ret) {
2027 				try_again = true;
2028 				continue;
2029 			}
2030 			break;
2031 		} else if (ret) {
2032 			break;
2033 		}
2034 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2035 		crtc_state = to_intel_crtc_state(crtc->state);
2036 		seq_printf(m, "DSC_Enabled: %s\n",
2037 			   yesno(crtc_state->dsc.compression_enable));
2038 		seq_printf(m, "DSC_Sink_Support: %s\n",
2039 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2040 		seq_printf(m, "Force_DSC_Enable: %s\n",
2041 			   yesno(intel_dp->force_dsc_en));
2042 		if (!intel_dp_is_edp(intel_dp))
2043 			seq_printf(m, "FEC_Sink_Support: %s\n",
2044 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2045 	} while (try_again);
2046 
2047 	drm_modeset_drop_locks(&ctx);
2048 	drm_modeset_acquire_fini(&ctx);
2049 
2050 	return ret;
2051 }
2052 
2053 static ssize_t i915_dsc_fec_support_write(struct file *file,
2054 					  const char __user *ubuf,
2055 					  size_t len, loff_t *offp)
2056 {
2057 	bool dsc_enable = false;
2058 	int ret;
2059 	struct drm_connector *connector =
2060 		((struct seq_file *)file->private_data)->private;
2061 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2062 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2063 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2064 
2065 	if (len == 0)
2066 		return 0;
2067 
2068 	drm_dbg(&i915->drm,
2069 		"Copied %zu bytes from user to force DSC\n", len);
2070 
2071 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2072 	if (ret < 0)
2073 		return ret;
2074 
2075 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2076 		(dsc_enable) ? "true" : "false");
2077 	intel_dp->force_dsc_en = dsc_enable;
2078 
2079 	*offp += len;
2080 	return len;
2081 }
2082 
2083 static int i915_dsc_fec_support_open(struct inode *inode,
2084 				     struct file *file)
2085 {
2086 	return single_open(file, i915_dsc_fec_support_show,
2087 			   inode->i_private);
2088 }
2089 
2090 static const struct file_operations i915_dsc_fec_support_fops = {
2091 	.owner = THIS_MODULE,
2092 	.open = i915_dsc_fec_support_open,
2093 	.read = seq_read,
2094 	.llseek = seq_lseek,
2095 	.release = single_release,
2096 	.write = i915_dsc_fec_support_write
2097 };
2098 
2099 /**
2100  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2101  * @connector: pointer to a registered drm_connector
2102  *
2103  * Cleanup will be done by drm_connector_unregister() through a call to
2104  * drm_debugfs_connector_remove().
2105  *
2106  * Returns 0 on success, negative error codes on error.
2107  */
2108 int intel_connector_debugfs_add(struct drm_connector *connector)
2109 {
2110 	struct dentry *root = connector->debugfs_entry;
2111 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2112 
2113 	/* The connector must have been registered beforehands. */
2114 	if (!root)
2115 		return -ENODEV;
2116 
2117 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2118 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2119 				    connector, &i915_panel_fops);
2120 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2121 				    connector, &i915_psr_sink_status_fops);
2122 	}
2123 
2124 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2125 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2126 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2127 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2128 				    connector, &i915_hdcp_sink_capability_fops);
2129 	}
2130 
2131 	if (INTEL_GEN(dev_priv) >= 10 &&
2132 	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2133 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2134 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2135 				    connector, &i915_dsc_fec_support_fops);
2136 
2137 	return 0;
2138 }
2139