1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dp.h"
15 #include "intel_fbc.h"
16 #include "intel_hdcp.h"
17 #include "intel_hdmi.h"
18 #include "intel_pm.h"
19 #include "intel_psr.h"
20 #include "intel_sideband.h"
21 
22 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
23 {
24 	return to_i915(node->minor->dev);
25 }
26 
27 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
28 {
29 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
30 
31 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
32 		   dev_priv->fb_tracking.busy_bits);
33 
34 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
35 		   dev_priv->fb_tracking.flip_bits);
36 
37 	return 0;
38 }
39 
40 static int i915_fbc_status(struct seq_file *m, void *unused)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	struct intel_fbc *fbc = &dev_priv->fbc;
44 	intel_wakeref_t wakeref;
45 
46 	if (!HAS_FBC(dev_priv))
47 		return -ENODEV;
48 
49 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
50 	mutex_lock(&fbc->lock);
51 
52 	if (intel_fbc_is_active(dev_priv))
53 		seq_puts(m, "FBC enabled\n");
54 	else
55 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
56 
57 	if (intel_fbc_is_active(dev_priv)) {
58 		u32 mask;
59 
60 		if (INTEL_GEN(dev_priv) >= 8)
61 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
62 		else if (INTEL_GEN(dev_priv) >= 7)
63 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
64 		else if (INTEL_GEN(dev_priv) >= 5)
65 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
66 		else if (IS_G4X(dev_priv))
67 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
68 		else
69 			mask = intel_de_read(dev_priv, FBC_STATUS) &
70 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
71 
72 		seq_printf(m, "Compressing: %s\n", yesno(mask));
73 	}
74 
75 	mutex_unlock(&fbc->lock);
76 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
77 
78 	return 0;
79 }
80 
81 static int i915_fbc_false_color_get(void *data, u64 *val)
82 {
83 	struct drm_i915_private *dev_priv = data;
84 
85 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
86 		return -ENODEV;
87 
88 	*val = dev_priv->fbc.false_color;
89 
90 	return 0;
91 }
92 
93 static int i915_fbc_false_color_set(void *data, u64 val)
94 {
95 	struct drm_i915_private *dev_priv = data;
96 	u32 reg;
97 
98 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
99 		return -ENODEV;
100 
101 	mutex_lock(&dev_priv->fbc.lock);
102 
103 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
104 	dev_priv->fbc.false_color = val;
105 
106 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
107 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
108 
109 	mutex_unlock(&dev_priv->fbc.lock);
110 	return 0;
111 }
112 
113 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
114 			i915_fbc_false_color_get, i915_fbc_false_color_set,
115 			"%llu\n");
116 
117 static int i915_ips_status(struct seq_file *m, void *unused)
118 {
119 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
120 	intel_wakeref_t wakeref;
121 
122 	if (!HAS_IPS(dev_priv))
123 		return -ENODEV;
124 
125 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
126 
127 	seq_printf(m, "Enabled by kernel parameter: %s\n",
128 		   yesno(dev_priv->params.enable_ips));
129 
130 	if (INTEL_GEN(dev_priv) >= 8) {
131 		seq_puts(m, "Currently: unknown\n");
132 	} else {
133 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
134 			seq_puts(m, "Currently: enabled\n");
135 		else
136 			seq_puts(m, "Currently: disabled\n");
137 	}
138 
139 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
140 
141 	return 0;
142 }
143 
144 static int i915_sr_status(struct seq_file *m, void *unused)
145 {
146 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
147 	intel_wakeref_t wakeref;
148 	bool sr_enabled = false;
149 
150 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
151 
152 	if (INTEL_GEN(dev_priv) >= 9)
153 		/* no global SR status; inspect per-plane WM */;
154 	else if (HAS_PCH_SPLIT(dev_priv))
155 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
156 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
157 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
158 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
159 	else if (IS_I915GM(dev_priv))
160 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
161 	else if (IS_PINEVIEW(dev_priv))
162 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
163 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
164 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
165 
166 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
167 
168 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
169 
170 	return 0;
171 }
172 
173 static int i915_opregion(struct seq_file *m, void *unused)
174 {
175 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
176 
177 	if (opregion->header)
178 		seq_write(m, opregion->header, OPREGION_SIZE);
179 
180 	return 0;
181 }
182 
183 static int i915_vbt(struct seq_file *m, void *unused)
184 {
185 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
186 
187 	if (opregion->vbt)
188 		seq_write(m, opregion->vbt, opregion->vbt_size);
189 
190 	return 0;
191 }
192 
193 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
194 {
195 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
196 	struct drm_device *dev = &dev_priv->drm;
197 	struct intel_framebuffer *fbdev_fb = NULL;
198 	struct drm_framebuffer *drm_fb;
199 
200 #ifdef CONFIG_DRM_FBDEV_EMULATION
201 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
202 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
203 
204 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
205 			   fbdev_fb->base.width,
206 			   fbdev_fb->base.height,
207 			   fbdev_fb->base.format->depth,
208 			   fbdev_fb->base.format->cpp[0] * 8,
209 			   fbdev_fb->base.modifier,
210 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
211 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
212 		seq_putc(m, '\n');
213 	}
214 #endif
215 
216 	mutex_lock(&dev->mode_config.fb_lock);
217 	drm_for_each_fb(drm_fb, dev) {
218 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
219 		if (fb == fbdev_fb)
220 			continue;
221 
222 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
223 			   fb->base.width,
224 			   fb->base.height,
225 			   fb->base.format->depth,
226 			   fb->base.format->cpp[0] * 8,
227 			   fb->base.modifier,
228 			   drm_framebuffer_read_refcount(&fb->base));
229 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
230 		seq_putc(m, '\n');
231 	}
232 	mutex_unlock(&dev->mode_config.fb_lock);
233 
234 	return 0;
235 }
236 
237 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
238 {
239 	u8 val;
240 	static const char * const sink_status[] = {
241 		"inactive",
242 		"transition to active, capture and display",
243 		"active, display from RFB",
244 		"active, capture and display on sink device timings",
245 		"transition to inactive, capture and display, timing re-sync",
246 		"reserved",
247 		"reserved",
248 		"sink internal error",
249 	};
250 	struct drm_connector *connector = m->private;
251 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
252 	struct intel_dp *intel_dp =
253 		intel_attached_dp(to_intel_connector(connector));
254 	int ret;
255 
256 	if (!CAN_PSR(dev_priv)) {
257 		seq_puts(m, "PSR Unsupported\n");
258 		return -ENODEV;
259 	}
260 
261 	if (connector->status != connector_status_connected)
262 		return -ENODEV;
263 
264 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
265 
266 	if (ret == 1) {
267 		const char *str = "unknown";
268 
269 		val &= DP_PSR_SINK_STATE_MASK;
270 		if (val < ARRAY_SIZE(sink_status))
271 			str = sink_status[val];
272 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
273 	} else {
274 		return ret;
275 	}
276 
277 	return 0;
278 }
279 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
280 
281 static void
282 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
283 {
284 	u32 val, status_val;
285 	const char *status = "unknown";
286 
287 	if (dev_priv->psr.psr2_enabled) {
288 		static const char * const live_status[] = {
289 			"IDLE",
290 			"CAPTURE",
291 			"CAPTURE_FS",
292 			"SLEEP",
293 			"BUFON_FW",
294 			"ML_UP",
295 			"SU_STANDBY",
296 			"FAST_SLEEP",
297 			"DEEP_SLEEP",
298 			"BUF_ON",
299 			"TG_ON"
300 		};
301 		val = intel_de_read(dev_priv,
302 				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
303 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
304 			      EDP_PSR2_STATUS_STATE_SHIFT;
305 		if (status_val < ARRAY_SIZE(live_status))
306 			status = live_status[status_val];
307 	} else {
308 		static const char * const live_status[] = {
309 			"IDLE",
310 			"SRDONACK",
311 			"SRDENT",
312 			"BUFOFF",
313 			"BUFON",
314 			"AUXACK",
315 			"SRDOFFACK",
316 			"SRDENT_ON",
317 		};
318 		val = intel_de_read(dev_priv,
319 				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
320 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
321 			      EDP_PSR_STATUS_STATE_SHIFT;
322 		if (status_val < ARRAY_SIZE(live_status))
323 			status = live_status[status_val];
324 	}
325 
326 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
327 }
328 
329 static int i915_edp_psr_status(struct seq_file *m, void *data)
330 {
331 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
332 	struct i915_psr *psr = &dev_priv->psr;
333 	intel_wakeref_t wakeref;
334 	const char *status;
335 	bool enabled;
336 	u32 val;
337 
338 	if (!HAS_PSR(dev_priv))
339 		return -ENODEV;
340 
341 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
342 	if (psr->dp)
343 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
344 	seq_puts(m, "\n");
345 
346 	if (!psr->sink_support)
347 		return 0;
348 
349 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
350 	mutex_lock(&psr->lock);
351 
352 	if (psr->enabled)
353 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
354 	else
355 		status = "disabled";
356 	seq_printf(m, "PSR mode: %s\n", status);
357 
358 	if (!psr->enabled) {
359 		seq_printf(m, "PSR sink not reliable: %s\n",
360 			   yesno(psr->sink_not_reliable));
361 
362 		goto unlock;
363 	}
364 
365 	if (psr->psr2_enabled) {
366 		val = intel_de_read(dev_priv,
367 				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
368 		enabled = val & EDP_PSR2_ENABLE;
369 	} else {
370 		val = intel_de_read(dev_priv,
371 				    EDP_PSR_CTL(dev_priv->psr.transcoder));
372 		enabled = val & EDP_PSR_ENABLE;
373 	}
374 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
375 		   enableddisabled(enabled), val);
376 	psr_source_status(dev_priv, m);
377 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
378 		   psr->busy_frontbuffer_bits);
379 
380 	/*
381 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
382 	 */
383 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
384 		val = intel_de_read(dev_priv,
385 				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
386 		val &= EDP_PSR_PERF_CNT_MASK;
387 		seq_printf(m, "Performance counter: %u\n", val);
388 	}
389 
390 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
391 		seq_printf(m, "Last attempted entry at: %lld\n",
392 			   psr->last_entry_attempt);
393 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
394 	}
395 
396 	if (psr->psr2_enabled) {
397 		u32 su_frames_val[3];
398 		int frame;
399 
400 		/*
401 		 * Reading all 3 registers before hand to minimize crossing a
402 		 * frame boundary between register reads
403 		 */
404 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
405 			val = intel_de_read(dev_priv,
406 					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
407 			su_frames_val[frame / 3] = val;
408 		}
409 
410 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
411 
412 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
413 			u32 su_blocks;
414 
415 			su_blocks = su_frames_val[frame / 3] &
416 				    PSR2_SU_STATUS_MASK(frame);
417 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
418 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
419 		}
420 
421 		seq_printf(m, "PSR2 selective fetch: %s\n",
422 			   enableddisabled(psr->psr2_sel_fetch_enabled));
423 	}
424 
425 unlock:
426 	mutex_unlock(&psr->lock);
427 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
428 
429 	return 0;
430 }
431 
432 static int
433 i915_edp_psr_debug_set(void *data, u64 val)
434 {
435 	struct drm_i915_private *dev_priv = data;
436 	intel_wakeref_t wakeref;
437 	int ret;
438 
439 	if (!CAN_PSR(dev_priv))
440 		return -ENODEV;
441 
442 	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
443 
444 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
445 
446 	ret = intel_psr_debug_set(dev_priv, val);
447 
448 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
449 
450 	return ret;
451 }
452 
453 static int
454 i915_edp_psr_debug_get(void *data, u64 *val)
455 {
456 	struct drm_i915_private *dev_priv = data;
457 
458 	if (!CAN_PSR(dev_priv))
459 		return -ENODEV;
460 
461 	*val = READ_ONCE(dev_priv->psr.debug);
462 	return 0;
463 }
464 
465 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
466 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
467 			"%llu\n");
468 
469 static int i915_power_domain_info(struct seq_file *m, void *unused)
470 {
471 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
472 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
473 	int i;
474 
475 	mutex_lock(&power_domains->lock);
476 
477 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
478 	for (i = 0; i < power_domains->power_well_count; i++) {
479 		struct i915_power_well *power_well;
480 		enum intel_display_power_domain power_domain;
481 
482 		power_well = &power_domains->power_wells[i];
483 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
484 			   power_well->count);
485 
486 		for_each_power_domain(power_domain, power_well->desc->domains)
487 			seq_printf(m, "  %-23s %d\n",
488 				 intel_display_power_domain_str(power_domain),
489 				 power_domains->domain_use_count[power_domain]);
490 	}
491 
492 	mutex_unlock(&power_domains->lock);
493 
494 	return 0;
495 }
496 
497 static int i915_dmc_info(struct seq_file *m, void *unused)
498 {
499 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
500 	intel_wakeref_t wakeref;
501 	struct intel_csr *csr;
502 	i915_reg_t dc5_reg, dc6_reg = {};
503 
504 	if (!HAS_CSR(dev_priv))
505 		return -ENODEV;
506 
507 	csr = &dev_priv->csr;
508 
509 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
510 
511 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
512 	seq_printf(m, "path: %s\n", csr->fw_path);
513 
514 	if (!csr->dmc_payload)
515 		goto out;
516 
517 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
518 		   CSR_VERSION_MINOR(csr->version));
519 
520 	if (INTEL_GEN(dev_priv) >= 12) {
521 		if (IS_DGFX(dev_priv)) {
522 			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
523 		} else {
524 			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
525 			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
526 		}
527 
528 		/*
529 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
530 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
531 		 * reg for DC3CO debugging and validation,
532 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
533 		 */
534 		seq_printf(m, "DC3CO count: %d\n",
535 			   intel_de_read(dev_priv, DMC_DEBUG3));
536 	} else {
537 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
538 						 SKL_CSR_DC3_DC5_COUNT;
539 		if (!IS_GEN9_LP(dev_priv))
540 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
541 	}
542 
543 	seq_printf(m, "DC3 -> DC5 count: %d\n",
544 		   intel_de_read(dev_priv, dc5_reg));
545 	if (dc6_reg.reg)
546 		seq_printf(m, "DC5 -> DC6 count: %d\n",
547 			   intel_de_read(dev_priv, dc6_reg));
548 
549 out:
550 	seq_printf(m, "program base: 0x%08x\n",
551 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
552 	seq_printf(m, "ssp base: 0x%08x\n",
553 		   intel_de_read(dev_priv, CSR_SSP_BASE));
554 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
555 
556 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
557 
558 	return 0;
559 }
560 
561 static void intel_seq_print_mode(struct seq_file *m, int tabs,
562 				 const struct drm_display_mode *mode)
563 {
564 	int i;
565 
566 	for (i = 0; i < tabs; i++)
567 		seq_putc(m, '\t');
568 
569 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
570 }
571 
572 static void intel_encoder_info(struct seq_file *m,
573 			       struct intel_crtc *crtc,
574 			       struct intel_encoder *encoder)
575 {
576 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
577 	struct drm_connector_list_iter conn_iter;
578 	struct drm_connector *connector;
579 
580 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
581 		   encoder->base.base.id, encoder->base.name);
582 
583 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
584 	drm_for_each_connector_iter(connector, &conn_iter) {
585 		const struct drm_connector_state *conn_state =
586 			connector->state;
587 
588 		if (conn_state->best_encoder != &encoder->base)
589 			continue;
590 
591 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
592 			   connector->base.id, connector->name);
593 	}
594 	drm_connector_list_iter_end(&conn_iter);
595 }
596 
597 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
598 {
599 	const struct drm_display_mode *mode = panel->fixed_mode;
600 
601 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
602 }
603 
604 static void intel_hdcp_info(struct seq_file *m,
605 			    struct intel_connector *intel_connector)
606 {
607 	bool hdcp_cap, hdcp2_cap;
608 
609 	if (!intel_connector->hdcp.shim) {
610 		seq_puts(m, "No Connector Support");
611 		goto out;
612 	}
613 
614 	hdcp_cap = intel_hdcp_capable(intel_connector);
615 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
616 
617 	if (hdcp_cap)
618 		seq_puts(m, "HDCP1.4 ");
619 	if (hdcp2_cap)
620 		seq_puts(m, "HDCP2.2 ");
621 
622 	if (!hdcp_cap && !hdcp2_cap)
623 		seq_puts(m, "None");
624 
625 out:
626 	seq_puts(m, "\n");
627 }
628 
629 static void intel_dp_info(struct seq_file *m,
630 			  struct intel_connector *intel_connector)
631 {
632 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
633 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
634 	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
635 
636 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
637 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
638 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
639 		intel_panel_info(m, &intel_connector->panel);
640 
641 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
642 				edid ? edid->data : NULL, &intel_dp->aux);
643 }
644 
645 static void intel_dp_mst_info(struct seq_file *m,
646 			      struct intel_connector *intel_connector)
647 {
648 	bool has_audio = intel_connector->port->has_audio;
649 
650 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
651 }
652 
653 static void intel_hdmi_info(struct seq_file *m,
654 			    struct intel_connector *intel_connector)
655 {
656 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
657 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
658 
659 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
660 }
661 
662 static void intel_lvds_info(struct seq_file *m,
663 			    struct intel_connector *intel_connector)
664 {
665 	intel_panel_info(m, &intel_connector->panel);
666 }
667 
668 static void intel_connector_info(struct seq_file *m,
669 				 struct drm_connector *connector)
670 {
671 	struct intel_connector *intel_connector = to_intel_connector(connector);
672 	const struct drm_connector_state *conn_state = connector->state;
673 	struct intel_encoder *encoder =
674 		to_intel_encoder(conn_state->best_encoder);
675 	const struct drm_display_mode *mode;
676 
677 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
678 		   connector->base.id, connector->name,
679 		   drm_get_connector_status_name(connector->status));
680 
681 	if (connector->status == connector_status_disconnected)
682 		return;
683 
684 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
685 		   connector->display_info.width_mm,
686 		   connector->display_info.height_mm);
687 	seq_printf(m, "\tsubpixel order: %s\n",
688 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
689 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
690 
691 	if (!encoder)
692 		return;
693 
694 	switch (connector->connector_type) {
695 	case DRM_MODE_CONNECTOR_DisplayPort:
696 	case DRM_MODE_CONNECTOR_eDP:
697 		if (encoder->type == INTEL_OUTPUT_DP_MST)
698 			intel_dp_mst_info(m, intel_connector);
699 		else
700 			intel_dp_info(m, intel_connector);
701 		break;
702 	case DRM_MODE_CONNECTOR_LVDS:
703 		if (encoder->type == INTEL_OUTPUT_LVDS)
704 			intel_lvds_info(m, intel_connector);
705 		break;
706 	case DRM_MODE_CONNECTOR_HDMIA:
707 		if (encoder->type == INTEL_OUTPUT_HDMI ||
708 		    encoder->type == INTEL_OUTPUT_DDI)
709 			intel_hdmi_info(m, intel_connector);
710 		break;
711 	default:
712 		break;
713 	}
714 
715 	seq_puts(m, "\tHDCP version: ");
716 	intel_hdcp_info(m, intel_connector);
717 
718 	seq_printf(m, "\tmodes:\n");
719 	list_for_each_entry(mode, &connector->modes, head)
720 		intel_seq_print_mode(m, 2, mode);
721 }
722 
723 static const char *plane_type(enum drm_plane_type type)
724 {
725 	switch (type) {
726 	case DRM_PLANE_TYPE_OVERLAY:
727 		return "OVL";
728 	case DRM_PLANE_TYPE_PRIMARY:
729 		return "PRI";
730 	case DRM_PLANE_TYPE_CURSOR:
731 		return "CUR";
732 	/*
733 	 * Deliberately omitting default: to generate compiler warnings
734 	 * when a new drm_plane_type gets added.
735 	 */
736 	}
737 
738 	return "unknown";
739 }
740 
741 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
742 {
743 	/*
744 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
745 	 * will print them all to visualize if the values are misused
746 	 */
747 	snprintf(buf, bufsize,
748 		 "%s%s%s%s%s%s(0x%08x)",
749 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
750 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
751 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
752 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
753 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
754 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
755 		 rotation);
756 }
757 
758 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
759 {
760 	const struct intel_plane_state *plane_state =
761 		to_intel_plane_state(plane->base.state);
762 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
763 	struct drm_format_name_buf format_name;
764 	struct drm_rect src, dst;
765 	char rot_str[48];
766 
767 	src = drm_plane_state_src(&plane_state->uapi);
768 	dst = drm_plane_state_dest(&plane_state->uapi);
769 
770 	if (fb)
771 		drm_get_format_name(fb->format->format, &format_name);
772 
773 	plane_rotation(rot_str, sizeof(rot_str),
774 		       plane_state->uapi.rotation);
775 
776 	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
777 		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
778 		   fb ? fb->width : 0, fb ? fb->height : 0,
779 		   DRM_RECT_FP_ARG(&src),
780 		   DRM_RECT_ARG(&dst),
781 		   rot_str);
782 }
783 
784 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
785 {
786 	const struct intel_plane_state *plane_state =
787 		to_intel_plane_state(plane->base.state);
788 	const struct drm_framebuffer *fb = plane_state->hw.fb;
789 	struct drm_format_name_buf format_name;
790 	char rot_str[48];
791 
792 	if (!fb)
793 		return;
794 
795 	drm_get_format_name(fb->format->format, &format_name);
796 
797 	plane_rotation(rot_str, sizeof(rot_str),
798 		       plane_state->hw.rotation);
799 
800 	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
801 		   fb->base.id, format_name.str,
802 		   fb->width, fb->height,
803 		   yesno(plane_state->uapi.visible),
804 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
805 		   DRM_RECT_ARG(&plane_state->uapi.dst),
806 		   rot_str);
807 }
808 
809 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
810 {
811 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
812 	struct intel_plane *plane;
813 
814 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
815 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
816 			   plane->base.base.id, plane->base.name,
817 			   plane_type(plane->base.type));
818 		intel_plane_uapi_info(m, plane);
819 		intel_plane_hw_info(m, plane);
820 	}
821 }
822 
823 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
824 {
825 	const struct intel_crtc_state *crtc_state =
826 		to_intel_crtc_state(crtc->base.state);
827 	int num_scalers = crtc->num_scalers;
828 	int i;
829 
830 	/* Not all platformas have a scaler */
831 	if (num_scalers) {
832 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
833 			   num_scalers,
834 			   crtc_state->scaler_state.scaler_users,
835 			   crtc_state->scaler_state.scaler_id);
836 
837 		for (i = 0; i < num_scalers; i++) {
838 			const struct intel_scaler *sc =
839 				&crtc_state->scaler_state.scalers[i];
840 
841 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
842 				   i, yesno(sc->in_use), sc->mode);
843 		}
844 		seq_puts(m, "\n");
845 	} else {
846 		seq_puts(m, "\tNo scalers available on this platform\n");
847 	}
848 }
849 
850 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
851 {
852 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
853 	const struct intel_crtc_state *crtc_state =
854 		to_intel_crtc_state(crtc->base.state);
855 	struct intel_encoder *encoder;
856 
857 	seq_printf(m, "[CRTC:%d:%s]:\n",
858 		   crtc->base.base.id, crtc->base.name);
859 
860 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
861 		   yesno(crtc_state->uapi.enable),
862 		   yesno(crtc_state->uapi.active),
863 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
864 
865 	if (crtc_state->hw.enable) {
866 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
867 			   yesno(crtc_state->hw.active),
868 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
869 
870 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
871 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
872 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
873 
874 		intel_scaler_info(m, crtc);
875 	}
876 
877 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
878 				    crtc_state->uapi.encoder_mask)
879 		intel_encoder_info(m, crtc, encoder);
880 
881 	intel_plane_info(m, crtc);
882 
883 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
884 		   yesno(!crtc->cpu_fifo_underrun_disabled),
885 		   yesno(!crtc->pch_fifo_underrun_disabled));
886 }
887 
888 static int i915_display_info(struct seq_file *m, void *unused)
889 {
890 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
891 	struct drm_device *dev = &dev_priv->drm;
892 	struct intel_crtc *crtc;
893 	struct drm_connector *connector;
894 	struct drm_connector_list_iter conn_iter;
895 	intel_wakeref_t wakeref;
896 
897 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
898 
899 	drm_modeset_lock_all(dev);
900 
901 	seq_printf(m, "CRTC info\n");
902 	seq_printf(m, "---------\n");
903 	for_each_intel_crtc(dev, crtc)
904 		intel_crtc_info(m, crtc);
905 
906 	seq_printf(m, "\n");
907 	seq_printf(m, "Connector info\n");
908 	seq_printf(m, "--------------\n");
909 	drm_connector_list_iter_begin(dev, &conn_iter);
910 	drm_for_each_connector_iter(connector, &conn_iter)
911 		intel_connector_info(m, connector);
912 	drm_connector_list_iter_end(&conn_iter);
913 
914 	drm_modeset_unlock_all(dev);
915 
916 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
917 
918 	return 0;
919 }
920 
921 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
922 {
923 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
924 	struct drm_device *dev = &dev_priv->drm;
925 	int i;
926 
927 	drm_modeset_lock_all(dev);
928 
929 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
930 		   dev_priv->dpll.ref_clks.nssc,
931 		   dev_priv->dpll.ref_clks.ssc);
932 
933 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
934 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
935 
936 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
937 			   pll->info->id);
938 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
939 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
940 		seq_printf(m, " tracked hardware state:\n");
941 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
942 		seq_printf(m, " dpll_md: 0x%08x\n",
943 			   pll->state.hw_state.dpll_md);
944 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
945 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
946 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
947 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
948 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
949 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
950 			   pll->state.hw_state.mg_refclkin_ctl);
951 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
952 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
953 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
954 			   pll->state.hw_state.mg_clktop2_hsclkctl);
955 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
956 			   pll->state.hw_state.mg_pll_div0);
957 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
958 			   pll->state.hw_state.mg_pll_div1);
959 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
960 			   pll->state.hw_state.mg_pll_lf);
961 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
962 			   pll->state.hw_state.mg_pll_frac_lock);
963 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
964 			   pll->state.hw_state.mg_pll_ssc);
965 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
966 			   pll->state.hw_state.mg_pll_bias);
967 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
968 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
969 	}
970 	drm_modeset_unlock_all(dev);
971 
972 	return 0;
973 }
974 
975 static int i915_ipc_status_show(struct seq_file *m, void *data)
976 {
977 	struct drm_i915_private *dev_priv = m->private;
978 
979 	seq_printf(m, "Isochronous Priority Control: %s\n",
980 			yesno(dev_priv->ipc_enabled));
981 	return 0;
982 }
983 
984 static int i915_ipc_status_open(struct inode *inode, struct file *file)
985 {
986 	struct drm_i915_private *dev_priv = inode->i_private;
987 
988 	if (!HAS_IPC(dev_priv))
989 		return -ENODEV;
990 
991 	return single_open(file, i915_ipc_status_show, dev_priv);
992 }
993 
994 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
995 				     size_t len, loff_t *offp)
996 {
997 	struct seq_file *m = file->private_data;
998 	struct drm_i915_private *dev_priv = m->private;
999 	intel_wakeref_t wakeref;
1000 	bool enable;
1001 	int ret;
1002 
1003 	ret = kstrtobool_from_user(ubuf, len, &enable);
1004 	if (ret < 0)
1005 		return ret;
1006 
1007 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1008 		if (!dev_priv->ipc_enabled && enable)
1009 			drm_info(&dev_priv->drm,
1010 				 "Enabling IPC: WM will be proper only after next commit\n");
1011 		dev_priv->wm.distrust_bios_wm = true;
1012 		dev_priv->ipc_enabled = enable;
1013 		intel_enable_ipc(dev_priv);
1014 	}
1015 
1016 	return len;
1017 }
1018 
1019 static const struct file_operations i915_ipc_status_fops = {
1020 	.owner = THIS_MODULE,
1021 	.open = i915_ipc_status_open,
1022 	.read = seq_read,
1023 	.llseek = seq_lseek,
1024 	.release = single_release,
1025 	.write = i915_ipc_status_write
1026 };
1027 
1028 static int i915_ddb_info(struct seq_file *m, void *unused)
1029 {
1030 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1031 	struct drm_device *dev = &dev_priv->drm;
1032 	struct skl_ddb_entry *entry;
1033 	struct intel_crtc *crtc;
1034 
1035 	if (INTEL_GEN(dev_priv) < 9)
1036 		return -ENODEV;
1037 
1038 	drm_modeset_lock_all(dev);
1039 
1040 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1041 
1042 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1043 		struct intel_crtc_state *crtc_state =
1044 			to_intel_crtc_state(crtc->base.state);
1045 		enum pipe pipe = crtc->pipe;
1046 		enum plane_id plane_id;
1047 
1048 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1049 
1050 		for_each_plane_id_on_crtc(crtc, plane_id) {
1051 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1052 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1053 				   entry->start, entry->end,
1054 				   skl_ddb_entry_size(entry));
1055 		}
1056 
1057 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1058 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1059 			   entry->end, skl_ddb_entry_size(entry));
1060 	}
1061 
1062 	drm_modeset_unlock_all(dev);
1063 
1064 	return 0;
1065 }
1066 
1067 static void drrs_status_per_crtc(struct seq_file *m,
1068 				 struct drm_device *dev,
1069 				 struct intel_crtc *intel_crtc)
1070 {
1071 	struct drm_i915_private *dev_priv = to_i915(dev);
1072 	struct i915_drrs *drrs = &dev_priv->drrs;
1073 	int vrefresh = 0;
1074 	struct drm_connector *connector;
1075 	struct drm_connector_list_iter conn_iter;
1076 
1077 	drm_connector_list_iter_begin(dev, &conn_iter);
1078 	drm_for_each_connector_iter(connector, &conn_iter) {
1079 		bool supported = false;
1080 
1081 		if (connector->state->crtc != &intel_crtc->base)
1082 			continue;
1083 
1084 		seq_printf(m, "%s:\n", connector->name);
1085 
1086 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1087 		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1088 			supported = true;
1089 
1090 		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1091 	}
1092 	drm_connector_list_iter_end(&conn_iter);
1093 
1094 	seq_puts(m, "\n");
1095 
1096 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1097 		struct intel_panel *panel;
1098 
1099 		mutex_lock(&drrs->mutex);
1100 		/* DRRS Supported */
1101 		seq_puts(m, "\tDRRS Enabled: Yes\n");
1102 
1103 		/* disable_drrs() will make drrs->dp NULL */
1104 		if (!drrs->dp) {
1105 			seq_puts(m, "Idleness DRRS: Disabled\n");
1106 			if (dev_priv->psr.enabled)
1107 				seq_puts(m,
1108 				"\tAs PSR is enabled, DRRS is not enabled\n");
1109 			mutex_unlock(&drrs->mutex);
1110 			return;
1111 		}
1112 
1113 		panel = &drrs->dp->attached_connector->panel;
1114 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1115 					drrs->busy_frontbuffer_bits);
1116 
1117 		seq_puts(m, "\n\t\t");
1118 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1119 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1120 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1121 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1122 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1123 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1124 		} else {
1125 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1126 						drrs->refresh_rate_type);
1127 			mutex_unlock(&drrs->mutex);
1128 			return;
1129 		}
1130 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1131 
1132 		seq_puts(m, "\n\t\t");
1133 		mutex_unlock(&drrs->mutex);
1134 	} else {
1135 		/* DRRS not supported. Print the VBT parameter*/
1136 		seq_puts(m, "\tDRRS Enabled : No");
1137 	}
1138 	seq_puts(m, "\n");
1139 }
1140 
1141 static int i915_drrs_status(struct seq_file *m, void *unused)
1142 {
1143 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1144 	struct drm_device *dev = &dev_priv->drm;
1145 	struct intel_crtc *intel_crtc;
1146 	int active_crtc_cnt = 0;
1147 
1148 	drm_modeset_lock_all(dev);
1149 	for_each_intel_crtc(dev, intel_crtc) {
1150 		if (intel_crtc->base.state->active) {
1151 			active_crtc_cnt++;
1152 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1153 
1154 			drrs_status_per_crtc(m, dev, intel_crtc);
1155 		}
1156 	}
1157 	drm_modeset_unlock_all(dev);
1158 
1159 	if (!active_crtc_cnt)
1160 		seq_puts(m, "No active crtc found\n");
1161 
1162 	return 0;
1163 }
1164 
1165 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1166 				seq_puts(m, "LPSP: disabled\n"))
1167 
1168 static bool
1169 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1170 			      enum i915_power_well_id power_well_id)
1171 {
1172 	intel_wakeref_t wakeref;
1173 	bool is_enabled;
1174 
1175 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1176 	is_enabled = intel_display_power_well_is_enabled(i915,
1177 							 power_well_id);
1178 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1179 
1180 	return is_enabled;
1181 }
1182 
1183 static int i915_lpsp_status(struct seq_file *m, void *unused)
1184 {
1185 	struct drm_i915_private *i915 = node_to_i915(m->private);
1186 
1187 	switch (INTEL_GEN(i915)) {
1188 	case 12:
1189 	case 11:
1190 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1191 		break;
1192 	case 10:
1193 	case 9:
1194 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1195 		break;
1196 	default:
1197 		/*
1198 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1199 		 * support lpsp.
1200 		 */
1201 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1202 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1203 		else
1204 			seq_puts(m, "LPSP: not supported\n");
1205 	}
1206 
1207 	return 0;
1208 }
1209 
1210 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1211 {
1212 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1213 	struct drm_device *dev = &dev_priv->drm;
1214 	struct intel_encoder *intel_encoder;
1215 	struct intel_digital_port *dig_port;
1216 	struct drm_connector *connector;
1217 	struct drm_connector_list_iter conn_iter;
1218 
1219 	drm_connector_list_iter_begin(dev, &conn_iter);
1220 	drm_for_each_connector_iter(connector, &conn_iter) {
1221 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1222 			continue;
1223 
1224 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1225 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1226 			continue;
1227 
1228 		dig_port = enc_to_dig_port(intel_encoder);
1229 		if (!dig_port->dp.can_mst)
1230 			continue;
1231 
1232 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1233 			   dig_port->base.base.base.id,
1234 			   dig_port->base.base.name);
1235 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1236 	}
1237 	drm_connector_list_iter_end(&conn_iter);
1238 
1239 	return 0;
1240 }
1241 
1242 static ssize_t i915_displayport_test_active_write(struct file *file,
1243 						  const char __user *ubuf,
1244 						  size_t len, loff_t *offp)
1245 {
1246 	char *input_buffer;
1247 	int status = 0;
1248 	struct drm_device *dev;
1249 	struct drm_connector *connector;
1250 	struct drm_connector_list_iter conn_iter;
1251 	struct intel_dp *intel_dp;
1252 	int val = 0;
1253 
1254 	dev = ((struct seq_file *)file->private_data)->private;
1255 
1256 	if (len == 0)
1257 		return 0;
1258 
1259 	input_buffer = memdup_user_nul(ubuf, len);
1260 	if (IS_ERR(input_buffer))
1261 		return PTR_ERR(input_buffer);
1262 
1263 	drm_dbg(&to_i915(dev)->drm,
1264 		"Copied %d bytes from user\n", (unsigned int)len);
1265 
1266 	drm_connector_list_iter_begin(dev, &conn_iter);
1267 	drm_for_each_connector_iter(connector, &conn_iter) {
1268 		struct intel_encoder *encoder;
1269 
1270 		if (connector->connector_type !=
1271 		    DRM_MODE_CONNECTOR_DisplayPort)
1272 			continue;
1273 
1274 		encoder = to_intel_encoder(connector->encoder);
1275 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1276 			continue;
1277 
1278 		if (encoder && connector->status == connector_status_connected) {
1279 			intel_dp = enc_to_intel_dp(encoder);
1280 			status = kstrtoint(input_buffer, 10, &val);
1281 			if (status < 0)
1282 				break;
1283 			drm_dbg(&to_i915(dev)->drm,
1284 				"Got %d for test active\n", val);
1285 			/* To prevent erroneous activation of the compliance
1286 			 * testing code, only accept an actual value of 1 here
1287 			 */
1288 			if (val == 1)
1289 				intel_dp->compliance.test_active = true;
1290 			else
1291 				intel_dp->compliance.test_active = false;
1292 		}
1293 	}
1294 	drm_connector_list_iter_end(&conn_iter);
1295 	kfree(input_buffer);
1296 	if (status < 0)
1297 		return status;
1298 
1299 	*offp += len;
1300 	return len;
1301 }
1302 
1303 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1304 {
1305 	struct drm_i915_private *dev_priv = m->private;
1306 	struct drm_device *dev = &dev_priv->drm;
1307 	struct drm_connector *connector;
1308 	struct drm_connector_list_iter conn_iter;
1309 	struct intel_dp *intel_dp;
1310 
1311 	drm_connector_list_iter_begin(dev, &conn_iter);
1312 	drm_for_each_connector_iter(connector, &conn_iter) {
1313 		struct intel_encoder *encoder;
1314 
1315 		if (connector->connector_type !=
1316 		    DRM_MODE_CONNECTOR_DisplayPort)
1317 			continue;
1318 
1319 		encoder = to_intel_encoder(connector->encoder);
1320 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1321 			continue;
1322 
1323 		if (encoder && connector->status == connector_status_connected) {
1324 			intel_dp = enc_to_intel_dp(encoder);
1325 			if (intel_dp->compliance.test_active)
1326 				seq_puts(m, "1");
1327 			else
1328 				seq_puts(m, "0");
1329 		} else
1330 			seq_puts(m, "0");
1331 	}
1332 	drm_connector_list_iter_end(&conn_iter);
1333 
1334 	return 0;
1335 }
1336 
1337 static int i915_displayport_test_active_open(struct inode *inode,
1338 					     struct file *file)
1339 {
1340 	return single_open(file, i915_displayport_test_active_show,
1341 			   inode->i_private);
1342 }
1343 
1344 static const struct file_operations i915_displayport_test_active_fops = {
1345 	.owner = THIS_MODULE,
1346 	.open = i915_displayport_test_active_open,
1347 	.read = seq_read,
1348 	.llseek = seq_lseek,
1349 	.release = single_release,
1350 	.write = i915_displayport_test_active_write
1351 };
1352 
1353 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1354 {
1355 	struct drm_i915_private *dev_priv = m->private;
1356 	struct drm_device *dev = &dev_priv->drm;
1357 	struct drm_connector *connector;
1358 	struct drm_connector_list_iter conn_iter;
1359 	struct intel_dp *intel_dp;
1360 
1361 	drm_connector_list_iter_begin(dev, &conn_iter);
1362 	drm_for_each_connector_iter(connector, &conn_iter) {
1363 		struct intel_encoder *encoder;
1364 
1365 		if (connector->connector_type !=
1366 		    DRM_MODE_CONNECTOR_DisplayPort)
1367 			continue;
1368 
1369 		encoder = to_intel_encoder(connector->encoder);
1370 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1371 			continue;
1372 
1373 		if (encoder && connector->status == connector_status_connected) {
1374 			intel_dp = enc_to_intel_dp(encoder);
1375 			if (intel_dp->compliance.test_type ==
1376 			    DP_TEST_LINK_EDID_READ)
1377 				seq_printf(m, "%lx",
1378 					   intel_dp->compliance.test_data.edid);
1379 			else if (intel_dp->compliance.test_type ==
1380 				 DP_TEST_LINK_VIDEO_PATTERN) {
1381 				seq_printf(m, "hdisplay: %d\n",
1382 					   intel_dp->compliance.test_data.hdisplay);
1383 				seq_printf(m, "vdisplay: %d\n",
1384 					   intel_dp->compliance.test_data.vdisplay);
1385 				seq_printf(m, "bpc: %u\n",
1386 					   intel_dp->compliance.test_data.bpc);
1387 			} else if (intel_dp->compliance.test_type ==
1388 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1389 				seq_printf(m, "pattern: %d\n",
1390 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1391 				seq_printf(m, "Number of lanes: %d\n",
1392 					   intel_dp->compliance.test_data.phytest.num_lanes);
1393 				seq_printf(m, "Link Rate: %d\n",
1394 					   intel_dp->compliance.test_data.phytest.link_rate);
1395 				seq_printf(m, "level: %02x\n",
1396 					   intel_dp->train_set[0]);
1397 			}
1398 		} else
1399 			seq_puts(m, "0");
1400 	}
1401 	drm_connector_list_iter_end(&conn_iter);
1402 
1403 	return 0;
1404 }
1405 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1406 
1407 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1408 {
1409 	struct drm_i915_private *dev_priv = m->private;
1410 	struct drm_device *dev = &dev_priv->drm;
1411 	struct drm_connector *connector;
1412 	struct drm_connector_list_iter conn_iter;
1413 	struct intel_dp *intel_dp;
1414 
1415 	drm_connector_list_iter_begin(dev, &conn_iter);
1416 	drm_for_each_connector_iter(connector, &conn_iter) {
1417 		struct intel_encoder *encoder;
1418 
1419 		if (connector->connector_type !=
1420 		    DRM_MODE_CONNECTOR_DisplayPort)
1421 			continue;
1422 
1423 		encoder = to_intel_encoder(connector->encoder);
1424 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1425 			continue;
1426 
1427 		if (encoder && connector->status == connector_status_connected) {
1428 			intel_dp = enc_to_intel_dp(encoder);
1429 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1430 		} else
1431 			seq_puts(m, "0");
1432 	}
1433 	drm_connector_list_iter_end(&conn_iter);
1434 
1435 	return 0;
1436 }
1437 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1438 
1439 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1440 {
1441 	struct drm_i915_private *dev_priv = m->private;
1442 	struct drm_device *dev = &dev_priv->drm;
1443 	int level;
1444 	int num_levels;
1445 
1446 	if (IS_CHERRYVIEW(dev_priv))
1447 		num_levels = 3;
1448 	else if (IS_VALLEYVIEW(dev_priv))
1449 		num_levels = 1;
1450 	else if (IS_G4X(dev_priv))
1451 		num_levels = 3;
1452 	else
1453 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1454 
1455 	drm_modeset_lock_all(dev);
1456 
1457 	for (level = 0; level < num_levels; level++) {
1458 		unsigned int latency = wm[level];
1459 
1460 		/*
1461 		 * - WM1+ latency values in 0.5us units
1462 		 * - latencies are in us on gen9/vlv/chv
1463 		 */
1464 		if (INTEL_GEN(dev_priv) >= 9 ||
1465 		    IS_VALLEYVIEW(dev_priv) ||
1466 		    IS_CHERRYVIEW(dev_priv) ||
1467 		    IS_G4X(dev_priv))
1468 			latency *= 10;
1469 		else if (level > 0)
1470 			latency *= 5;
1471 
1472 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1473 			   level, wm[level], latency / 10, latency % 10);
1474 	}
1475 
1476 	drm_modeset_unlock_all(dev);
1477 }
1478 
1479 static int pri_wm_latency_show(struct seq_file *m, void *data)
1480 {
1481 	struct drm_i915_private *dev_priv = m->private;
1482 	const u16 *latencies;
1483 
1484 	if (INTEL_GEN(dev_priv) >= 9)
1485 		latencies = dev_priv->wm.skl_latency;
1486 	else
1487 		latencies = dev_priv->wm.pri_latency;
1488 
1489 	wm_latency_show(m, latencies);
1490 
1491 	return 0;
1492 }
1493 
1494 static int spr_wm_latency_show(struct seq_file *m, void *data)
1495 {
1496 	struct drm_i915_private *dev_priv = m->private;
1497 	const u16 *latencies;
1498 
1499 	if (INTEL_GEN(dev_priv) >= 9)
1500 		latencies = dev_priv->wm.skl_latency;
1501 	else
1502 		latencies = dev_priv->wm.spr_latency;
1503 
1504 	wm_latency_show(m, latencies);
1505 
1506 	return 0;
1507 }
1508 
1509 static int cur_wm_latency_show(struct seq_file *m, void *data)
1510 {
1511 	struct drm_i915_private *dev_priv = m->private;
1512 	const u16 *latencies;
1513 
1514 	if (INTEL_GEN(dev_priv) >= 9)
1515 		latencies = dev_priv->wm.skl_latency;
1516 	else
1517 		latencies = dev_priv->wm.cur_latency;
1518 
1519 	wm_latency_show(m, latencies);
1520 
1521 	return 0;
1522 }
1523 
1524 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1525 {
1526 	struct drm_i915_private *dev_priv = inode->i_private;
1527 
1528 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1529 		return -ENODEV;
1530 
1531 	return single_open(file, pri_wm_latency_show, dev_priv);
1532 }
1533 
1534 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1535 {
1536 	struct drm_i915_private *dev_priv = inode->i_private;
1537 
1538 	if (HAS_GMCH(dev_priv))
1539 		return -ENODEV;
1540 
1541 	return single_open(file, spr_wm_latency_show, dev_priv);
1542 }
1543 
1544 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1545 {
1546 	struct drm_i915_private *dev_priv = inode->i_private;
1547 
1548 	if (HAS_GMCH(dev_priv))
1549 		return -ENODEV;
1550 
1551 	return single_open(file, cur_wm_latency_show, dev_priv);
1552 }
1553 
1554 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1555 				size_t len, loff_t *offp, u16 wm[8])
1556 {
1557 	struct seq_file *m = file->private_data;
1558 	struct drm_i915_private *dev_priv = m->private;
1559 	struct drm_device *dev = &dev_priv->drm;
1560 	u16 new[8] = { 0 };
1561 	int num_levels;
1562 	int level;
1563 	int ret;
1564 	char tmp[32];
1565 
1566 	if (IS_CHERRYVIEW(dev_priv))
1567 		num_levels = 3;
1568 	else if (IS_VALLEYVIEW(dev_priv))
1569 		num_levels = 1;
1570 	else if (IS_G4X(dev_priv))
1571 		num_levels = 3;
1572 	else
1573 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1574 
1575 	if (len >= sizeof(tmp))
1576 		return -EINVAL;
1577 
1578 	if (copy_from_user(tmp, ubuf, len))
1579 		return -EFAULT;
1580 
1581 	tmp[len] = '\0';
1582 
1583 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1584 		     &new[0], &new[1], &new[2], &new[3],
1585 		     &new[4], &new[5], &new[6], &new[7]);
1586 	if (ret != num_levels)
1587 		return -EINVAL;
1588 
1589 	drm_modeset_lock_all(dev);
1590 
1591 	for (level = 0; level < num_levels; level++)
1592 		wm[level] = new[level];
1593 
1594 	drm_modeset_unlock_all(dev);
1595 
1596 	return len;
1597 }
1598 
1599 
1600 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1601 				    size_t len, loff_t *offp)
1602 {
1603 	struct seq_file *m = file->private_data;
1604 	struct drm_i915_private *dev_priv = m->private;
1605 	u16 *latencies;
1606 
1607 	if (INTEL_GEN(dev_priv) >= 9)
1608 		latencies = dev_priv->wm.skl_latency;
1609 	else
1610 		latencies = dev_priv->wm.pri_latency;
1611 
1612 	return wm_latency_write(file, ubuf, len, offp, latencies);
1613 }
1614 
1615 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1616 				    size_t len, loff_t *offp)
1617 {
1618 	struct seq_file *m = file->private_data;
1619 	struct drm_i915_private *dev_priv = m->private;
1620 	u16 *latencies;
1621 
1622 	if (INTEL_GEN(dev_priv) >= 9)
1623 		latencies = dev_priv->wm.skl_latency;
1624 	else
1625 		latencies = dev_priv->wm.spr_latency;
1626 
1627 	return wm_latency_write(file, ubuf, len, offp, latencies);
1628 }
1629 
1630 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1631 				    size_t len, loff_t *offp)
1632 {
1633 	struct seq_file *m = file->private_data;
1634 	struct drm_i915_private *dev_priv = m->private;
1635 	u16 *latencies;
1636 
1637 	if (INTEL_GEN(dev_priv) >= 9)
1638 		latencies = dev_priv->wm.skl_latency;
1639 	else
1640 		latencies = dev_priv->wm.cur_latency;
1641 
1642 	return wm_latency_write(file, ubuf, len, offp, latencies);
1643 }
1644 
1645 static const struct file_operations i915_pri_wm_latency_fops = {
1646 	.owner = THIS_MODULE,
1647 	.open = pri_wm_latency_open,
1648 	.read = seq_read,
1649 	.llseek = seq_lseek,
1650 	.release = single_release,
1651 	.write = pri_wm_latency_write
1652 };
1653 
1654 static const struct file_operations i915_spr_wm_latency_fops = {
1655 	.owner = THIS_MODULE,
1656 	.open = spr_wm_latency_open,
1657 	.read = seq_read,
1658 	.llseek = seq_lseek,
1659 	.release = single_release,
1660 	.write = spr_wm_latency_write
1661 };
1662 
1663 static const struct file_operations i915_cur_wm_latency_fops = {
1664 	.owner = THIS_MODULE,
1665 	.open = cur_wm_latency_open,
1666 	.read = seq_read,
1667 	.llseek = seq_lseek,
1668 	.release = single_release,
1669 	.write = cur_wm_latency_write
1670 };
1671 
1672 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1673 {
1674 	struct drm_i915_private *dev_priv = m->private;
1675 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1676 
1677 	/* Synchronize with everything first in case there's been an HPD
1678 	 * storm, but we haven't finished handling it in the kernel yet
1679 	 */
1680 	intel_synchronize_irq(dev_priv);
1681 	flush_work(&dev_priv->hotplug.dig_port_work);
1682 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1683 
1684 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1685 	seq_printf(m, "Detected: %s\n",
1686 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1687 
1688 	return 0;
1689 }
1690 
1691 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1692 					const char __user *ubuf, size_t len,
1693 					loff_t *offp)
1694 {
1695 	struct seq_file *m = file->private_data;
1696 	struct drm_i915_private *dev_priv = m->private;
1697 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1698 	unsigned int new_threshold;
1699 	int i;
1700 	char *newline;
1701 	char tmp[16];
1702 
1703 	if (len >= sizeof(tmp))
1704 		return -EINVAL;
1705 
1706 	if (copy_from_user(tmp, ubuf, len))
1707 		return -EFAULT;
1708 
1709 	tmp[len] = '\0';
1710 
1711 	/* Strip newline, if any */
1712 	newline = strchr(tmp, '\n');
1713 	if (newline)
1714 		*newline = '\0';
1715 
1716 	if (strcmp(tmp, "reset") == 0)
1717 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1718 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1719 		return -EINVAL;
1720 
1721 	if (new_threshold > 0)
1722 		drm_dbg_kms(&dev_priv->drm,
1723 			    "Setting HPD storm detection threshold to %d\n",
1724 			    new_threshold);
1725 	else
1726 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1727 
1728 	spin_lock_irq(&dev_priv->irq_lock);
1729 	hotplug->hpd_storm_threshold = new_threshold;
1730 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1731 	for_each_hpd_pin(i)
1732 		hotplug->stats[i].count = 0;
1733 	spin_unlock_irq(&dev_priv->irq_lock);
1734 
1735 	/* Re-enable hpd immediately if we were in an irq storm */
1736 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1737 
1738 	return len;
1739 }
1740 
1741 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1742 {
1743 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1744 }
1745 
1746 static const struct file_operations i915_hpd_storm_ctl_fops = {
1747 	.owner = THIS_MODULE,
1748 	.open = i915_hpd_storm_ctl_open,
1749 	.read = seq_read,
1750 	.llseek = seq_lseek,
1751 	.release = single_release,
1752 	.write = i915_hpd_storm_ctl_write
1753 };
1754 
1755 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1756 {
1757 	struct drm_i915_private *dev_priv = m->private;
1758 
1759 	seq_printf(m, "Enabled: %s\n",
1760 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1761 
1762 	return 0;
1763 }
1764 
1765 static int
1766 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1767 {
1768 	return single_open(file, i915_hpd_short_storm_ctl_show,
1769 			   inode->i_private);
1770 }
1771 
1772 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1773 					      const char __user *ubuf,
1774 					      size_t len, loff_t *offp)
1775 {
1776 	struct seq_file *m = file->private_data;
1777 	struct drm_i915_private *dev_priv = m->private;
1778 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1779 	char *newline;
1780 	char tmp[16];
1781 	int i;
1782 	bool new_state;
1783 
1784 	if (len >= sizeof(tmp))
1785 		return -EINVAL;
1786 
1787 	if (copy_from_user(tmp, ubuf, len))
1788 		return -EFAULT;
1789 
1790 	tmp[len] = '\0';
1791 
1792 	/* Strip newline, if any */
1793 	newline = strchr(tmp, '\n');
1794 	if (newline)
1795 		*newline = '\0';
1796 
1797 	/* Reset to the "default" state for this system */
1798 	if (strcmp(tmp, "reset") == 0)
1799 		new_state = !HAS_DP_MST(dev_priv);
1800 	else if (kstrtobool(tmp, &new_state) != 0)
1801 		return -EINVAL;
1802 
1803 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1804 		    new_state ? "En" : "Dis");
1805 
1806 	spin_lock_irq(&dev_priv->irq_lock);
1807 	hotplug->hpd_short_storm_enabled = new_state;
1808 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1809 	for_each_hpd_pin(i)
1810 		hotplug->stats[i].count = 0;
1811 	spin_unlock_irq(&dev_priv->irq_lock);
1812 
1813 	/* Re-enable hpd immediately if we were in an irq storm */
1814 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1815 
1816 	return len;
1817 }
1818 
1819 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1820 	.owner = THIS_MODULE,
1821 	.open = i915_hpd_short_storm_ctl_open,
1822 	.read = seq_read,
1823 	.llseek = seq_lseek,
1824 	.release = single_release,
1825 	.write = i915_hpd_short_storm_ctl_write,
1826 };
1827 
1828 static int i915_drrs_ctl_set(void *data, u64 val)
1829 {
1830 	struct drm_i915_private *dev_priv = data;
1831 	struct drm_device *dev = &dev_priv->drm;
1832 	struct intel_crtc *crtc;
1833 
1834 	if (INTEL_GEN(dev_priv) < 7)
1835 		return -ENODEV;
1836 
1837 	for_each_intel_crtc(dev, crtc) {
1838 		struct drm_connector_list_iter conn_iter;
1839 		struct intel_crtc_state *crtc_state;
1840 		struct drm_connector *connector;
1841 		struct drm_crtc_commit *commit;
1842 		int ret;
1843 
1844 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1845 		if (ret)
1846 			return ret;
1847 
1848 		crtc_state = to_intel_crtc_state(crtc->base.state);
1849 
1850 		if (!crtc_state->hw.active ||
1851 		    !crtc_state->has_drrs)
1852 			goto out;
1853 
1854 		commit = crtc_state->uapi.commit;
1855 		if (commit) {
1856 			ret = wait_for_completion_interruptible(&commit->hw_done);
1857 			if (ret)
1858 				goto out;
1859 		}
1860 
1861 		drm_connector_list_iter_begin(dev, &conn_iter);
1862 		drm_for_each_connector_iter(connector, &conn_iter) {
1863 			struct intel_encoder *encoder;
1864 			struct intel_dp *intel_dp;
1865 
1866 			if (!(crtc_state->uapi.connector_mask &
1867 			      drm_connector_mask(connector)))
1868 				continue;
1869 
1870 			encoder = intel_attached_encoder(to_intel_connector(connector));
1871 			if (encoder->type != INTEL_OUTPUT_EDP)
1872 				continue;
1873 
1874 			drm_dbg(&dev_priv->drm,
1875 				"Manually %sabling DRRS. %llu\n",
1876 				val ? "en" : "dis", val);
1877 
1878 			intel_dp = enc_to_intel_dp(encoder);
1879 			if (val)
1880 				intel_edp_drrs_enable(intel_dp,
1881 						      crtc_state);
1882 			else
1883 				intel_edp_drrs_disable(intel_dp,
1884 						       crtc_state);
1885 		}
1886 		drm_connector_list_iter_end(&conn_iter);
1887 
1888 out:
1889 		drm_modeset_unlock(&crtc->base.mutex);
1890 		if (ret)
1891 			return ret;
1892 	}
1893 
1894 	return 0;
1895 }
1896 
1897 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1898 
1899 static ssize_t
1900 i915_fifo_underrun_reset_write(struct file *filp,
1901 			       const char __user *ubuf,
1902 			       size_t cnt, loff_t *ppos)
1903 {
1904 	struct drm_i915_private *dev_priv = filp->private_data;
1905 	struct intel_crtc *intel_crtc;
1906 	struct drm_device *dev = &dev_priv->drm;
1907 	int ret;
1908 	bool reset;
1909 
1910 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1911 	if (ret)
1912 		return ret;
1913 
1914 	if (!reset)
1915 		return cnt;
1916 
1917 	for_each_intel_crtc(dev, intel_crtc) {
1918 		struct drm_crtc_commit *commit;
1919 		struct intel_crtc_state *crtc_state;
1920 
1921 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
1922 		if (ret)
1923 			return ret;
1924 
1925 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
1926 		commit = crtc_state->uapi.commit;
1927 		if (commit) {
1928 			ret = wait_for_completion_interruptible(&commit->hw_done);
1929 			if (!ret)
1930 				ret = wait_for_completion_interruptible(&commit->flip_done);
1931 		}
1932 
1933 		if (!ret && crtc_state->hw.active) {
1934 			drm_dbg_kms(&dev_priv->drm,
1935 				    "Re-arming FIFO underruns on pipe %c\n",
1936 				    pipe_name(intel_crtc->pipe));
1937 
1938 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
1939 		}
1940 
1941 		drm_modeset_unlock(&intel_crtc->base.mutex);
1942 
1943 		if (ret)
1944 			return ret;
1945 	}
1946 
1947 	ret = intel_fbc_reset_underrun(dev_priv);
1948 	if (ret)
1949 		return ret;
1950 
1951 	return cnt;
1952 }
1953 
1954 static const struct file_operations i915_fifo_underrun_reset_ops = {
1955 	.owner = THIS_MODULE,
1956 	.open = simple_open,
1957 	.write = i915_fifo_underrun_reset_write,
1958 	.llseek = default_llseek,
1959 };
1960 
1961 static const struct drm_info_list intel_display_debugfs_list[] = {
1962 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
1963 	{"i915_fbc_status", i915_fbc_status, 0},
1964 	{"i915_ips_status", i915_ips_status, 0},
1965 	{"i915_sr_status", i915_sr_status, 0},
1966 	{"i915_opregion", i915_opregion, 0},
1967 	{"i915_vbt", i915_vbt, 0},
1968 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1969 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
1970 	{"i915_power_domain_info", i915_power_domain_info, 0},
1971 	{"i915_dmc_info", i915_dmc_info, 0},
1972 	{"i915_display_info", i915_display_info, 0},
1973 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
1974 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
1975 	{"i915_ddb_info", i915_ddb_info, 0},
1976 	{"i915_drrs_status", i915_drrs_status, 0},
1977 	{"i915_lpsp_status", i915_lpsp_status, 0},
1978 };
1979 
1980 static const struct {
1981 	const char *name;
1982 	const struct file_operations *fops;
1983 } intel_display_debugfs_files[] = {
1984 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
1985 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
1986 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
1987 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
1988 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
1989 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
1990 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
1991 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
1992 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
1993 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
1994 	{"i915_ipc_status", &i915_ipc_status_fops},
1995 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
1996 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
1997 };
1998 
1999 void intel_display_debugfs_register(struct drm_i915_private *i915)
2000 {
2001 	struct drm_minor *minor = i915->drm.primary;
2002 	int i;
2003 
2004 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2005 		debugfs_create_file(intel_display_debugfs_files[i].name,
2006 				    S_IRUGO | S_IWUSR,
2007 				    minor->debugfs_root,
2008 				    to_i915(minor->dev),
2009 				    intel_display_debugfs_files[i].fops);
2010 	}
2011 
2012 	drm_debugfs_create_files(intel_display_debugfs_list,
2013 				 ARRAY_SIZE(intel_display_debugfs_list),
2014 				 minor->debugfs_root, minor);
2015 }
2016 
2017 static int i915_panel_show(struct seq_file *m, void *data)
2018 {
2019 	struct drm_connector *connector = m->private;
2020 	struct intel_dp *intel_dp =
2021 		intel_attached_dp(to_intel_connector(connector));
2022 
2023 	if (connector->status != connector_status_connected)
2024 		return -ENODEV;
2025 
2026 	seq_printf(m, "Panel power up delay: %d\n",
2027 		   intel_dp->panel_power_up_delay);
2028 	seq_printf(m, "Panel power down delay: %d\n",
2029 		   intel_dp->panel_power_down_delay);
2030 	seq_printf(m, "Backlight on delay: %d\n",
2031 		   intel_dp->backlight_on_delay);
2032 	seq_printf(m, "Backlight off delay: %d\n",
2033 		   intel_dp->backlight_off_delay);
2034 
2035 	return 0;
2036 }
2037 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2038 
2039 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2040 {
2041 	struct drm_connector *connector = m->private;
2042 	struct intel_connector *intel_connector = to_intel_connector(connector);
2043 
2044 	if (connector->status != connector_status_connected)
2045 		return -ENODEV;
2046 
2047 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2048 		   connector->base.id);
2049 	intel_hdcp_info(m, intel_connector);
2050 
2051 	return 0;
2052 }
2053 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2054 
2055 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2056 				seq_puts(m, "LPSP: incapable\n"))
2057 
2058 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2059 {
2060 	struct drm_connector *connector = m->private;
2061 	struct drm_i915_private *i915 = to_i915(connector->dev);
2062 	struct intel_encoder *encoder;
2063 
2064 	encoder = intel_attached_encoder(to_intel_connector(connector));
2065 	if (!encoder)
2066 		return -ENODEV;
2067 
2068 	if (connector->status != connector_status_connected)
2069 		return -ENODEV;
2070 
2071 	switch (INTEL_GEN(i915)) {
2072 	case 12:
2073 		/*
2074 		 * Actually TGL can drive LPSP on port till DDI_C
2075 		 * but there is no physical connected DDI_C on TGL sku's,
2076 		 * even driver is not initilizing DDI_C port for gen12.
2077 		 */
2078 		LPSP_CAPABLE(encoder->port <= PORT_B);
2079 		break;
2080 	case 11:
2081 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2082 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2083 		break;
2084 	case 10:
2085 	case 9:
2086 		LPSP_CAPABLE(encoder->port == PORT_A &&
2087 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2088 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2089 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2090 		break;
2091 	default:
2092 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2093 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2094 	}
2095 
2096 	return 0;
2097 }
2098 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2099 
2100 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2101 {
2102 	struct drm_connector *connector = m->private;
2103 	struct drm_device *dev = connector->dev;
2104 	struct drm_crtc *crtc;
2105 	struct intel_dp *intel_dp;
2106 	struct drm_modeset_acquire_ctx ctx;
2107 	struct intel_crtc_state *crtc_state = NULL;
2108 	int ret = 0;
2109 	bool try_again = false;
2110 
2111 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2112 
2113 	do {
2114 		try_again = false;
2115 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2116 				       &ctx);
2117 		if (ret) {
2118 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2119 				try_again = true;
2120 				continue;
2121 			}
2122 			break;
2123 		}
2124 		crtc = connector->state->crtc;
2125 		if (connector->status != connector_status_connected || !crtc) {
2126 			ret = -ENODEV;
2127 			break;
2128 		}
2129 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2130 		if (ret == -EDEADLK) {
2131 			ret = drm_modeset_backoff(&ctx);
2132 			if (!ret) {
2133 				try_again = true;
2134 				continue;
2135 			}
2136 			break;
2137 		} else if (ret) {
2138 			break;
2139 		}
2140 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2141 		crtc_state = to_intel_crtc_state(crtc->state);
2142 		seq_printf(m, "DSC_Enabled: %s\n",
2143 			   yesno(crtc_state->dsc.compression_enable));
2144 		seq_printf(m, "DSC_Sink_Support: %s\n",
2145 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2146 		seq_printf(m, "Force_DSC_Enable: %s\n",
2147 			   yesno(intel_dp->force_dsc_en));
2148 		if (!intel_dp_is_edp(intel_dp))
2149 			seq_printf(m, "FEC_Sink_Support: %s\n",
2150 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2151 	} while (try_again);
2152 
2153 	drm_modeset_drop_locks(&ctx);
2154 	drm_modeset_acquire_fini(&ctx);
2155 
2156 	return ret;
2157 }
2158 
2159 static ssize_t i915_dsc_fec_support_write(struct file *file,
2160 					  const char __user *ubuf,
2161 					  size_t len, loff_t *offp)
2162 {
2163 	bool dsc_enable = false;
2164 	int ret;
2165 	struct drm_connector *connector =
2166 		((struct seq_file *)file->private_data)->private;
2167 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2168 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2169 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2170 
2171 	if (len == 0)
2172 		return 0;
2173 
2174 	drm_dbg(&i915->drm,
2175 		"Copied %zu bytes from user to force DSC\n", len);
2176 
2177 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2178 	if (ret < 0)
2179 		return ret;
2180 
2181 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2182 		(dsc_enable) ? "true" : "false");
2183 	intel_dp->force_dsc_en = dsc_enable;
2184 
2185 	*offp += len;
2186 	return len;
2187 }
2188 
2189 static int i915_dsc_fec_support_open(struct inode *inode,
2190 				     struct file *file)
2191 {
2192 	return single_open(file, i915_dsc_fec_support_show,
2193 			   inode->i_private);
2194 }
2195 
2196 static const struct file_operations i915_dsc_fec_support_fops = {
2197 	.owner = THIS_MODULE,
2198 	.open = i915_dsc_fec_support_open,
2199 	.read = seq_read,
2200 	.llseek = seq_lseek,
2201 	.release = single_release,
2202 	.write = i915_dsc_fec_support_write
2203 };
2204 
2205 /**
2206  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2207  * @connector: pointer to a registered drm_connector
2208  *
2209  * Cleanup will be done by drm_connector_unregister() through a call to
2210  * drm_debugfs_connector_remove().
2211  *
2212  * Returns 0 on success, negative error codes on error.
2213  */
2214 int intel_connector_debugfs_add(struct drm_connector *connector)
2215 {
2216 	struct dentry *root = connector->debugfs_entry;
2217 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2218 
2219 	/* The connector must have been registered beforehands. */
2220 	if (!root)
2221 		return -ENODEV;
2222 
2223 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2224 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2225 				    connector, &i915_panel_fops);
2226 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2227 				    connector, &i915_psr_sink_status_fops);
2228 	}
2229 
2230 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2231 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2232 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2233 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2234 				    connector, &i915_hdcp_sink_capability_fops);
2235 	}
2236 
2237 	if (INTEL_GEN(dev_priv) >= 10 &&
2238 	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2239 	      !to_intel_connector(connector)->mst_port) ||
2240 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2241 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2242 				    connector, &i915_dsc_fec_support_fops);
2243 
2244 	/* Legacy panels doesn't lpsp on any platform */
2245 	if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2246 	     IS_BROADWELL(dev_priv)) &&
2247 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2248 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2249 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2250 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2251 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2252 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2253 				    connector, &i915_lpsp_capability_fops);
2254 
2255 	return 0;
2256 }
2257