1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dp.h"
15 #include "intel_fbc.h"
16 #include "intel_hdcp.h"
17 #include "intel_hdmi.h"
18 #include "intel_pm.h"
19 #include "intel_psr.h"
20 #include "intel_sideband.h"
21 
22 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
23 {
24 	return to_i915(node->minor->dev);
25 }
26 
27 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
28 {
29 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
30 
31 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
32 		   dev_priv->fb_tracking.busy_bits);
33 
34 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
35 		   dev_priv->fb_tracking.flip_bits);
36 
37 	return 0;
38 }
39 
40 static int i915_fbc_status(struct seq_file *m, void *unused)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	struct intel_fbc *fbc = &dev_priv->fbc;
44 	intel_wakeref_t wakeref;
45 
46 	if (!HAS_FBC(dev_priv))
47 		return -ENODEV;
48 
49 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
50 	mutex_lock(&fbc->lock);
51 
52 	if (intel_fbc_is_active(dev_priv))
53 		seq_puts(m, "FBC enabled\n");
54 	else
55 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
56 
57 	if (intel_fbc_is_active(dev_priv)) {
58 		u32 mask;
59 
60 		if (INTEL_GEN(dev_priv) >= 8)
61 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
62 		else if (INTEL_GEN(dev_priv) >= 7)
63 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
64 		else if (INTEL_GEN(dev_priv) >= 5)
65 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
66 		else if (IS_G4X(dev_priv))
67 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
68 		else
69 			mask = intel_de_read(dev_priv, FBC_STATUS) &
70 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
71 
72 		seq_printf(m, "Compressing: %s\n", yesno(mask));
73 	}
74 
75 	mutex_unlock(&fbc->lock);
76 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
77 
78 	return 0;
79 }
80 
81 static int i915_fbc_false_color_get(void *data, u64 *val)
82 {
83 	struct drm_i915_private *dev_priv = data;
84 
85 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
86 		return -ENODEV;
87 
88 	*val = dev_priv->fbc.false_color;
89 
90 	return 0;
91 }
92 
93 static int i915_fbc_false_color_set(void *data, u64 val)
94 {
95 	struct drm_i915_private *dev_priv = data;
96 	u32 reg;
97 
98 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
99 		return -ENODEV;
100 
101 	mutex_lock(&dev_priv->fbc.lock);
102 
103 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
104 	dev_priv->fbc.false_color = val;
105 
106 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
107 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
108 
109 	mutex_unlock(&dev_priv->fbc.lock);
110 	return 0;
111 }
112 
113 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
114 			i915_fbc_false_color_get, i915_fbc_false_color_set,
115 			"%llu\n");
116 
117 static int i915_ips_status(struct seq_file *m, void *unused)
118 {
119 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
120 	intel_wakeref_t wakeref;
121 
122 	if (!HAS_IPS(dev_priv))
123 		return -ENODEV;
124 
125 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
126 
127 	seq_printf(m, "Enabled by kernel parameter: %s\n",
128 		   yesno(dev_priv->params.enable_ips));
129 
130 	if (INTEL_GEN(dev_priv) >= 8) {
131 		seq_puts(m, "Currently: unknown\n");
132 	} else {
133 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
134 			seq_puts(m, "Currently: enabled\n");
135 		else
136 			seq_puts(m, "Currently: disabled\n");
137 	}
138 
139 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
140 
141 	return 0;
142 }
143 
144 static int i915_sr_status(struct seq_file *m, void *unused)
145 {
146 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
147 	intel_wakeref_t wakeref;
148 	bool sr_enabled = false;
149 
150 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
151 
152 	if (INTEL_GEN(dev_priv) >= 9)
153 		/* no global SR status; inspect per-plane WM */;
154 	else if (HAS_PCH_SPLIT(dev_priv))
155 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
156 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
157 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
158 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
159 	else if (IS_I915GM(dev_priv))
160 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
161 	else if (IS_PINEVIEW(dev_priv))
162 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
163 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
164 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
165 
166 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
167 
168 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
169 
170 	return 0;
171 }
172 
173 static int i915_opregion(struct seq_file *m, void *unused)
174 {
175 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
176 
177 	if (opregion->header)
178 		seq_write(m, opregion->header, OPREGION_SIZE);
179 
180 	return 0;
181 }
182 
183 static int i915_vbt(struct seq_file *m, void *unused)
184 {
185 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
186 
187 	if (opregion->vbt)
188 		seq_write(m, opregion->vbt, opregion->vbt_size);
189 
190 	return 0;
191 }
192 
193 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
194 {
195 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
196 	struct drm_device *dev = &dev_priv->drm;
197 	struct intel_framebuffer *fbdev_fb = NULL;
198 	struct drm_framebuffer *drm_fb;
199 
200 #ifdef CONFIG_DRM_FBDEV_EMULATION
201 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
202 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
203 
204 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
205 			   fbdev_fb->base.width,
206 			   fbdev_fb->base.height,
207 			   fbdev_fb->base.format->depth,
208 			   fbdev_fb->base.format->cpp[0] * 8,
209 			   fbdev_fb->base.modifier,
210 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
211 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
212 		seq_putc(m, '\n');
213 	}
214 #endif
215 
216 	mutex_lock(&dev->mode_config.fb_lock);
217 	drm_for_each_fb(drm_fb, dev) {
218 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
219 		if (fb == fbdev_fb)
220 			continue;
221 
222 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
223 			   fb->base.width,
224 			   fb->base.height,
225 			   fb->base.format->depth,
226 			   fb->base.format->cpp[0] * 8,
227 			   fb->base.modifier,
228 			   drm_framebuffer_read_refcount(&fb->base));
229 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
230 		seq_putc(m, '\n');
231 	}
232 	mutex_unlock(&dev->mode_config.fb_lock);
233 
234 	return 0;
235 }
236 
237 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
238 {
239 	u8 val;
240 	static const char * const sink_status[] = {
241 		"inactive",
242 		"transition to active, capture and display",
243 		"active, display from RFB",
244 		"active, capture and display on sink device timings",
245 		"transition to inactive, capture and display, timing re-sync",
246 		"reserved",
247 		"reserved",
248 		"sink internal error",
249 	};
250 	struct drm_connector *connector = m->private;
251 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
252 	struct intel_dp *intel_dp =
253 		intel_attached_dp(to_intel_connector(connector));
254 	int ret;
255 
256 	if (!CAN_PSR(dev_priv)) {
257 		seq_puts(m, "PSR Unsupported\n");
258 		return -ENODEV;
259 	}
260 
261 	if (connector->status != connector_status_connected)
262 		return -ENODEV;
263 
264 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
265 
266 	if (ret == 1) {
267 		const char *str = "unknown";
268 
269 		val &= DP_PSR_SINK_STATE_MASK;
270 		if (val < ARRAY_SIZE(sink_status))
271 			str = sink_status[val];
272 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
273 	} else {
274 		return ret;
275 	}
276 
277 	return 0;
278 }
279 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
280 
281 static void
282 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
283 {
284 	u32 val, status_val;
285 	const char *status = "unknown";
286 
287 	if (dev_priv->psr.psr2_enabled) {
288 		static const char * const live_status[] = {
289 			"IDLE",
290 			"CAPTURE",
291 			"CAPTURE_FS",
292 			"SLEEP",
293 			"BUFON_FW",
294 			"ML_UP",
295 			"SU_STANDBY",
296 			"FAST_SLEEP",
297 			"DEEP_SLEEP",
298 			"BUF_ON",
299 			"TG_ON"
300 		};
301 		val = intel_de_read(dev_priv,
302 				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
303 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
304 			      EDP_PSR2_STATUS_STATE_SHIFT;
305 		if (status_val < ARRAY_SIZE(live_status))
306 			status = live_status[status_val];
307 	} else {
308 		static const char * const live_status[] = {
309 			"IDLE",
310 			"SRDONACK",
311 			"SRDENT",
312 			"BUFOFF",
313 			"BUFON",
314 			"AUXACK",
315 			"SRDOFFACK",
316 			"SRDENT_ON",
317 		};
318 		val = intel_de_read(dev_priv,
319 				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
320 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
321 			      EDP_PSR_STATUS_STATE_SHIFT;
322 		if (status_val < ARRAY_SIZE(live_status))
323 			status = live_status[status_val];
324 	}
325 
326 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
327 }
328 
329 static int i915_edp_psr_status(struct seq_file *m, void *data)
330 {
331 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
332 	struct i915_psr *psr = &dev_priv->psr;
333 	intel_wakeref_t wakeref;
334 	const char *status;
335 	bool enabled;
336 	u32 val;
337 
338 	if (!HAS_PSR(dev_priv))
339 		return -ENODEV;
340 
341 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
342 	if (psr->dp)
343 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
344 	seq_puts(m, "\n");
345 
346 	if (!psr->sink_support)
347 		return 0;
348 
349 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
350 	mutex_lock(&psr->lock);
351 
352 	if (psr->enabled)
353 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
354 	else
355 		status = "disabled";
356 	seq_printf(m, "PSR mode: %s\n", status);
357 
358 	if (!psr->enabled) {
359 		seq_printf(m, "PSR sink not reliable: %s\n",
360 			   yesno(psr->sink_not_reliable));
361 
362 		goto unlock;
363 	}
364 
365 	if (psr->psr2_enabled) {
366 		val = intel_de_read(dev_priv,
367 				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
368 		enabled = val & EDP_PSR2_ENABLE;
369 	} else {
370 		val = intel_de_read(dev_priv,
371 				    EDP_PSR_CTL(dev_priv->psr.transcoder));
372 		enabled = val & EDP_PSR_ENABLE;
373 	}
374 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
375 		   enableddisabled(enabled), val);
376 	psr_source_status(dev_priv, m);
377 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
378 		   psr->busy_frontbuffer_bits);
379 
380 	/*
381 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
382 	 */
383 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
384 		val = intel_de_read(dev_priv,
385 				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
386 		val &= EDP_PSR_PERF_CNT_MASK;
387 		seq_printf(m, "Performance counter: %u\n", val);
388 	}
389 
390 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
391 		seq_printf(m, "Last attempted entry at: %lld\n",
392 			   psr->last_entry_attempt);
393 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
394 	}
395 
396 	if (psr->psr2_enabled) {
397 		u32 su_frames_val[3];
398 		int frame;
399 
400 		/*
401 		 * Reading all 3 registers before hand to minimize crossing a
402 		 * frame boundary between register reads
403 		 */
404 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
405 			val = intel_de_read(dev_priv,
406 					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
407 			su_frames_val[frame / 3] = val;
408 		}
409 
410 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
411 
412 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
413 			u32 su_blocks;
414 
415 			su_blocks = su_frames_val[frame / 3] &
416 				    PSR2_SU_STATUS_MASK(frame);
417 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
418 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
419 		}
420 
421 		seq_printf(m, "PSR2 selective fetch: %s\n",
422 			   enableddisabled(psr->psr2_sel_fetch_enabled));
423 	}
424 
425 unlock:
426 	mutex_unlock(&psr->lock);
427 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
428 
429 	return 0;
430 }
431 
432 static int
433 i915_edp_psr_debug_set(void *data, u64 val)
434 {
435 	struct drm_i915_private *dev_priv = data;
436 	intel_wakeref_t wakeref;
437 	int ret;
438 
439 	if (!CAN_PSR(dev_priv))
440 		return -ENODEV;
441 
442 	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
443 
444 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
445 
446 	ret = intel_psr_debug_set(dev_priv, val);
447 
448 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
449 
450 	return ret;
451 }
452 
453 static int
454 i915_edp_psr_debug_get(void *data, u64 *val)
455 {
456 	struct drm_i915_private *dev_priv = data;
457 
458 	if (!CAN_PSR(dev_priv))
459 		return -ENODEV;
460 
461 	*val = READ_ONCE(dev_priv->psr.debug);
462 	return 0;
463 }
464 
465 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
466 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
467 			"%llu\n");
468 
469 static int i915_power_domain_info(struct seq_file *m, void *unused)
470 {
471 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
472 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
473 	int i;
474 
475 	mutex_lock(&power_domains->lock);
476 
477 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
478 	for (i = 0; i < power_domains->power_well_count; i++) {
479 		struct i915_power_well *power_well;
480 		enum intel_display_power_domain power_domain;
481 
482 		power_well = &power_domains->power_wells[i];
483 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
484 			   power_well->count);
485 
486 		for_each_power_domain(power_domain, power_well->desc->domains)
487 			seq_printf(m, "  %-23s %d\n",
488 				 intel_display_power_domain_str(power_domain),
489 				 power_domains->domain_use_count[power_domain]);
490 	}
491 
492 	mutex_unlock(&power_domains->lock);
493 
494 	return 0;
495 }
496 
497 static int i915_dmc_info(struct seq_file *m, void *unused)
498 {
499 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
500 	intel_wakeref_t wakeref;
501 	struct intel_csr *csr;
502 	i915_reg_t dc5_reg, dc6_reg = {};
503 
504 	if (!HAS_CSR(dev_priv))
505 		return -ENODEV;
506 
507 	csr = &dev_priv->csr;
508 
509 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
510 
511 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
512 	seq_printf(m, "path: %s\n", csr->fw_path);
513 
514 	if (!csr->dmc_payload)
515 		goto out;
516 
517 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
518 		   CSR_VERSION_MINOR(csr->version));
519 
520 	if (INTEL_GEN(dev_priv) >= 12) {
521 		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
522 		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
523 		/*
524 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
525 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
526 		 * reg for DC3CO debugging and validation,
527 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
528 		 */
529 		seq_printf(m, "DC3CO count: %d\n",
530 			   intel_de_read(dev_priv, DMC_DEBUG3));
531 	} else {
532 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
533 						 SKL_CSR_DC3_DC5_COUNT;
534 		if (!IS_GEN9_LP(dev_priv))
535 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
536 	}
537 
538 	seq_printf(m, "DC3 -> DC5 count: %d\n",
539 		   intel_de_read(dev_priv, dc5_reg));
540 	if (dc6_reg.reg)
541 		seq_printf(m, "DC5 -> DC6 count: %d\n",
542 			   intel_de_read(dev_priv, dc6_reg));
543 
544 out:
545 	seq_printf(m, "program base: 0x%08x\n",
546 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
547 	seq_printf(m, "ssp base: 0x%08x\n",
548 		   intel_de_read(dev_priv, CSR_SSP_BASE));
549 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
550 
551 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
552 
553 	return 0;
554 }
555 
556 static void intel_seq_print_mode(struct seq_file *m, int tabs,
557 				 const struct drm_display_mode *mode)
558 {
559 	int i;
560 
561 	for (i = 0; i < tabs; i++)
562 		seq_putc(m, '\t');
563 
564 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
565 }
566 
567 static void intel_encoder_info(struct seq_file *m,
568 			       struct intel_crtc *crtc,
569 			       struct intel_encoder *encoder)
570 {
571 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
572 	struct drm_connector_list_iter conn_iter;
573 	struct drm_connector *connector;
574 
575 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
576 		   encoder->base.base.id, encoder->base.name);
577 
578 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
579 	drm_for_each_connector_iter(connector, &conn_iter) {
580 		const struct drm_connector_state *conn_state =
581 			connector->state;
582 
583 		if (conn_state->best_encoder != &encoder->base)
584 			continue;
585 
586 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
587 			   connector->base.id, connector->name);
588 	}
589 	drm_connector_list_iter_end(&conn_iter);
590 }
591 
592 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
593 {
594 	const struct drm_display_mode *mode = panel->fixed_mode;
595 
596 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
597 }
598 
599 static void intel_hdcp_info(struct seq_file *m,
600 			    struct intel_connector *intel_connector)
601 {
602 	bool hdcp_cap, hdcp2_cap;
603 
604 	hdcp_cap = intel_hdcp_capable(intel_connector);
605 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
606 
607 	if (hdcp_cap)
608 		seq_puts(m, "HDCP1.4 ");
609 	if (hdcp2_cap)
610 		seq_puts(m, "HDCP2.2 ");
611 
612 	if (!hdcp_cap && !hdcp2_cap)
613 		seq_puts(m, "None");
614 
615 	seq_puts(m, "\n");
616 }
617 
618 static void intel_dp_info(struct seq_file *m,
619 			  struct intel_connector *intel_connector)
620 {
621 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
622 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
623 
624 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
625 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
626 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
627 		intel_panel_info(m, &intel_connector->panel);
628 
629 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
630 				&intel_dp->aux);
631 	if (intel_connector->hdcp.shim) {
632 		seq_puts(m, "\tHDCP version: ");
633 		intel_hdcp_info(m, intel_connector);
634 	}
635 }
636 
637 static void intel_dp_mst_info(struct seq_file *m,
638 			      struct intel_connector *intel_connector)
639 {
640 	bool has_audio = intel_connector->port->has_audio;
641 
642 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
643 }
644 
645 static void intel_hdmi_info(struct seq_file *m,
646 			    struct intel_connector *intel_connector)
647 {
648 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
649 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
650 
651 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
652 	if (intel_connector->hdcp.shim) {
653 		seq_puts(m, "\tHDCP version: ");
654 		intel_hdcp_info(m, intel_connector);
655 	}
656 }
657 
658 static void intel_lvds_info(struct seq_file *m,
659 			    struct intel_connector *intel_connector)
660 {
661 	intel_panel_info(m, &intel_connector->panel);
662 }
663 
664 static void intel_connector_info(struct seq_file *m,
665 				 struct drm_connector *connector)
666 {
667 	struct intel_connector *intel_connector = to_intel_connector(connector);
668 	const struct drm_connector_state *conn_state = connector->state;
669 	struct intel_encoder *encoder =
670 		to_intel_encoder(conn_state->best_encoder);
671 	const struct drm_display_mode *mode;
672 
673 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
674 		   connector->base.id, connector->name,
675 		   drm_get_connector_status_name(connector->status));
676 
677 	if (connector->status == connector_status_disconnected)
678 		return;
679 
680 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
681 		   connector->display_info.width_mm,
682 		   connector->display_info.height_mm);
683 	seq_printf(m, "\tsubpixel order: %s\n",
684 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
685 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
686 
687 	if (!encoder)
688 		return;
689 
690 	switch (connector->connector_type) {
691 	case DRM_MODE_CONNECTOR_DisplayPort:
692 	case DRM_MODE_CONNECTOR_eDP:
693 		if (encoder->type == INTEL_OUTPUT_DP_MST)
694 			intel_dp_mst_info(m, intel_connector);
695 		else
696 			intel_dp_info(m, intel_connector);
697 		break;
698 	case DRM_MODE_CONNECTOR_LVDS:
699 		if (encoder->type == INTEL_OUTPUT_LVDS)
700 			intel_lvds_info(m, intel_connector);
701 		break;
702 	case DRM_MODE_CONNECTOR_HDMIA:
703 		if (encoder->type == INTEL_OUTPUT_HDMI ||
704 		    encoder->type == INTEL_OUTPUT_DDI)
705 			intel_hdmi_info(m, intel_connector);
706 		break;
707 	default:
708 		break;
709 	}
710 
711 	seq_printf(m, "\tmodes:\n");
712 	list_for_each_entry(mode, &connector->modes, head)
713 		intel_seq_print_mode(m, 2, mode);
714 }
715 
716 static const char *plane_type(enum drm_plane_type type)
717 {
718 	switch (type) {
719 	case DRM_PLANE_TYPE_OVERLAY:
720 		return "OVL";
721 	case DRM_PLANE_TYPE_PRIMARY:
722 		return "PRI";
723 	case DRM_PLANE_TYPE_CURSOR:
724 		return "CUR";
725 	/*
726 	 * Deliberately omitting default: to generate compiler warnings
727 	 * when a new drm_plane_type gets added.
728 	 */
729 	}
730 
731 	return "unknown";
732 }
733 
734 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
735 {
736 	/*
737 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
738 	 * will print them all to visualize if the values are misused
739 	 */
740 	snprintf(buf, bufsize,
741 		 "%s%s%s%s%s%s(0x%08x)",
742 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
743 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
744 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
745 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
746 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
747 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
748 		 rotation);
749 }
750 
751 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
752 {
753 	const struct intel_plane_state *plane_state =
754 		to_intel_plane_state(plane->base.state);
755 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
756 	struct drm_format_name_buf format_name;
757 	struct drm_rect src, dst;
758 	char rot_str[48];
759 
760 	src = drm_plane_state_src(&plane_state->uapi);
761 	dst = drm_plane_state_dest(&plane_state->uapi);
762 
763 	if (fb)
764 		drm_get_format_name(fb->format->format, &format_name);
765 
766 	plane_rotation(rot_str, sizeof(rot_str),
767 		       plane_state->uapi.rotation);
768 
769 	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
770 		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
771 		   fb ? fb->width : 0, fb ? fb->height : 0,
772 		   DRM_RECT_FP_ARG(&src),
773 		   DRM_RECT_ARG(&dst),
774 		   rot_str);
775 }
776 
777 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
778 {
779 	const struct intel_plane_state *plane_state =
780 		to_intel_plane_state(plane->base.state);
781 	const struct drm_framebuffer *fb = plane_state->hw.fb;
782 	struct drm_format_name_buf format_name;
783 	char rot_str[48];
784 
785 	if (!fb)
786 		return;
787 
788 	drm_get_format_name(fb->format->format, &format_name);
789 
790 	plane_rotation(rot_str, sizeof(rot_str),
791 		       plane_state->hw.rotation);
792 
793 	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
794 		   fb->base.id, format_name.str,
795 		   fb->width, fb->height,
796 		   yesno(plane_state->uapi.visible),
797 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
798 		   DRM_RECT_ARG(&plane_state->uapi.dst),
799 		   rot_str);
800 }
801 
802 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
803 {
804 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
805 	struct intel_plane *plane;
806 
807 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
808 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
809 			   plane->base.base.id, plane->base.name,
810 			   plane_type(plane->base.type));
811 		intel_plane_uapi_info(m, plane);
812 		intel_plane_hw_info(m, plane);
813 	}
814 }
815 
816 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
817 {
818 	const struct intel_crtc_state *crtc_state =
819 		to_intel_crtc_state(crtc->base.state);
820 	int num_scalers = crtc->num_scalers;
821 	int i;
822 
823 	/* Not all platformas have a scaler */
824 	if (num_scalers) {
825 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
826 			   num_scalers,
827 			   crtc_state->scaler_state.scaler_users,
828 			   crtc_state->scaler_state.scaler_id);
829 
830 		for (i = 0; i < num_scalers; i++) {
831 			const struct intel_scaler *sc =
832 				&crtc_state->scaler_state.scalers[i];
833 
834 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
835 				   i, yesno(sc->in_use), sc->mode);
836 		}
837 		seq_puts(m, "\n");
838 	} else {
839 		seq_puts(m, "\tNo scalers available on this platform\n");
840 	}
841 }
842 
843 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
844 {
845 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
846 	const struct intel_crtc_state *crtc_state =
847 		to_intel_crtc_state(crtc->base.state);
848 	struct intel_encoder *encoder;
849 
850 	seq_printf(m, "[CRTC:%d:%s]:\n",
851 		   crtc->base.base.id, crtc->base.name);
852 
853 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
854 		   yesno(crtc_state->uapi.enable),
855 		   yesno(crtc_state->uapi.active),
856 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
857 
858 	if (crtc_state->hw.enable) {
859 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
860 			   yesno(crtc_state->hw.active),
861 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
862 
863 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
864 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
865 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
866 
867 		intel_scaler_info(m, crtc);
868 	}
869 
870 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
871 				    crtc_state->uapi.encoder_mask)
872 		intel_encoder_info(m, crtc, encoder);
873 
874 	intel_plane_info(m, crtc);
875 
876 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
877 		   yesno(!crtc->cpu_fifo_underrun_disabled),
878 		   yesno(!crtc->pch_fifo_underrun_disabled));
879 }
880 
881 static int i915_display_info(struct seq_file *m, void *unused)
882 {
883 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
884 	struct drm_device *dev = &dev_priv->drm;
885 	struct intel_crtc *crtc;
886 	struct drm_connector *connector;
887 	struct drm_connector_list_iter conn_iter;
888 	intel_wakeref_t wakeref;
889 
890 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
891 
892 	drm_modeset_lock_all(dev);
893 
894 	seq_printf(m, "CRTC info\n");
895 	seq_printf(m, "---------\n");
896 	for_each_intel_crtc(dev, crtc)
897 		intel_crtc_info(m, crtc);
898 
899 	seq_printf(m, "\n");
900 	seq_printf(m, "Connector info\n");
901 	seq_printf(m, "--------------\n");
902 	drm_connector_list_iter_begin(dev, &conn_iter);
903 	drm_for_each_connector_iter(connector, &conn_iter)
904 		intel_connector_info(m, connector);
905 	drm_connector_list_iter_end(&conn_iter);
906 
907 	drm_modeset_unlock_all(dev);
908 
909 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
910 
911 	return 0;
912 }
913 
914 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
915 {
916 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
917 	struct drm_device *dev = &dev_priv->drm;
918 	int i;
919 
920 	drm_modeset_lock_all(dev);
921 
922 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
923 		   dev_priv->dpll.ref_clks.nssc,
924 		   dev_priv->dpll.ref_clks.ssc);
925 
926 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
927 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
928 
929 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
930 			   pll->info->id);
931 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
932 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
933 		seq_printf(m, " tracked hardware state:\n");
934 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
935 		seq_printf(m, " dpll_md: 0x%08x\n",
936 			   pll->state.hw_state.dpll_md);
937 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
938 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
939 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
940 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
941 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
942 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
943 			   pll->state.hw_state.mg_refclkin_ctl);
944 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
945 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
946 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
947 			   pll->state.hw_state.mg_clktop2_hsclkctl);
948 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
949 			   pll->state.hw_state.mg_pll_div0);
950 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
951 			   pll->state.hw_state.mg_pll_div1);
952 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
953 			   pll->state.hw_state.mg_pll_lf);
954 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
955 			   pll->state.hw_state.mg_pll_frac_lock);
956 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
957 			   pll->state.hw_state.mg_pll_ssc);
958 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
959 			   pll->state.hw_state.mg_pll_bias);
960 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
961 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
962 	}
963 	drm_modeset_unlock_all(dev);
964 
965 	return 0;
966 }
967 
968 static int i915_ipc_status_show(struct seq_file *m, void *data)
969 {
970 	struct drm_i915_private *dev_priv = m->private;
971 
972 	seq_printf(m, "Isochronous Priority Control: %s\n",
973 			yesno(dev_priv->ipc_enabled));
974 	return 0;
975 }
976 
977 static int i915_ipc_status_open(struct inode *inode, struct file *file)
978 {
979 	struct drm_i915_private *dev_priv = inode->i_private;
980 
981 	if (!HAS_IPC(dev_priv))
982 		return -ENODEV;
983 
984 	return single_open(file, i915_ipc_status_show, dev_priv);
985 }
986 
987 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
988 				     size_t len, loff_t *offp)
989 {
990 	struct seq_file *m = file->private_data;
991 	struct drm_i915_private *dev_priv = m->private;
992 	intel_wakeref_t wakeref;
993 	bool enable;
994 	int ret;
995 
996 	ret = kstrtobool_from_user(ubuf, len, &enable);
997 	if (ret < 0)
998 		return ret;
999 
1000 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1001 		if (!dev_priv->ipc_enabled && enable)
1002 			drm_info(&dev_priv->drm,
1003 				 "Enabling IPC: WM will be proper only after next commit\n");
1004 		dev_priv->wm.distrust_bios_wm = true;
1005 		dev_priv->ipc_enabled = enable;
1006 		intel_enable_ipc(dev_priv);
1007 	}
1008 
1009 	return len;
1010 }
1011 
1012 static const struct file_operations i915_ipc_status_fops = {
1013 	.owner = THIS_MODULE,
1014 	.open = i915_ipc_status_open,
1015 	.read = seq_read,
1016 	.llseek = seq_lseek,
1017 	.release = single_release,
1018 	.write = i915_ipc_status_write
1019 };
1020 
1021 static int i915_ddb_info(struct seq_file *m, void *unused)
1022 {
1023 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1024 	struct drm_device *dev = &dev_priv->drm;
1025 	struct skl_ddb_entry *entry;
1026 	struct intel_crtc *crtc;
1027 
1028 	if (INTEL_GEN(dev_priv) < 9)
1029 		return -ENODEV;
1030 
1031 	drm_modeset_lock_all(dev);
1032 
1033 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1034 
1035 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1036 		struct intel_crtc_state *crtc_state =
1037 			to_intel_crtc_state(crtc->base.state);
1038 		enum pipe pipe = crtc->pipe;
1039 		enum plane_id plane_id;
1040 
1041 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1042 
1043 		for_each_plane_id_on_crtc(crtc, plane_id) {
1044 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1045 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1046 				   entry->start, entry->end,
1047 				   skl_ddb_entry_size(entry));
1048 		}
1049 
1050 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1051 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1052 			   entry->end, skl_ddb_entry_size(entry));
1053 	}
1054 
1055 	drm_modeset_unlock_all(dev);
1056 
1057 	return 0;
1058 }
1059 
1060 static void drrs_status_per_crtc(struct seq_file *m,
1061 				 struct drm_device *dev,
1062 				 struct intel_crtc *intel_crtc)
1063 {
1064 	struct drm_i915_private *dev_priv = to_i915(dev);
1065 	struct i915_drrs *drrs = &dev_priv->drrs;
1066 	int vrefresh = 0;
1067 	struct drm_connector *connector;
1068 	struct drm_connector_list_iter conn_iter;
1069 
1070 	drm_connector_list_iter_begin(dev, &conn_iter);
1071 	drm_for_each_connector_iter(connector, &conn_iter) {
1072 		if (connector->state->crtc != &intel_crtc->base)
1073 			continue;
1074 
1075 		seq_printf(m, "%s:\n", connector->name);
1076 	}
1077 	drm_connector_list_iter_end(&conn_iter);
1078 
1079 	seq_puts(m, "\n");
1080 
1081 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1082 		struct intel_panel *panel;
1083 
1084 		mutex_lock(&drrs->mutex);
1085 		/* DRRS Supported */
1086 		seq_puts(m, "\tDRRS Supported: Yes\n");
1087 
1088 		/* disable_drrs() will make drrs->dp NULL */
1089 		if (!drrs->dp) {
1090 			seq_puts(m, "Idleness DRRS: Disabled\n");
1091 			if (dev_priv->psr.enabled)
1092 				seq_puts(m,
1093 				"\tAs PSR is enabled, DRRS is not enabled\n");
1094 			mutex_unlock(&drrs->mutex);
1095 			return;
1096 		}
1097 
1098 		panel = &drrs->dp->attached_connector->panel;
1099 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1100 					drrs->busy_frontbuffer_bits);
1101 
1102 		seq_puts(m, "\n\t\t");
1103 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1104 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1105 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1106 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1107 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1108 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1109 		} else {
1110 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1111 						drrs->refresh_rate_type);
1112 			mutex_unlock(&drrs->mutex);
1113 			return;
1114 		}
1115 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1116 
1117 		seq_puts(m, "\n\t\t");
1118 		mutex_unlock(&drrs->mutex);
1119 	} else {
1120 		/* DRRS not supported. Print the VBT parameter*/
1121 		seq_puts(m, "\tDRRS Supported : No");
1122 	}
1123 	seq_puts(m, "\n");
1124 }
1125 
1126 static int i915_drrs_status(struct seq_file *m, void *unused)
1127 {
1128 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1129 	struct drm_device *dev = &dev_priv->drm;
1130 	struct intel_crtc *intel_crtc;
1131 	int active_crtc_cnt = 0;
1132 
1133 	drm_modeset_lock_all(dev);
1134 	for_each_intel_crtc(dev, intel_crtc) {
1135 		if (intel_crtc->base.state->active) {
1136 			active_crtc_cnt++;
1137 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1138 
1139 			drrs_status_per_crtc(m, dev, intel_crtc);
1140 		}
1141 	}
1142 	drm_modeset_unlock_all(dev);
1143 
1144 	if (!active_crtc_cnt)
1145 		seq_puts(m, "No active crtc found\n");
1146 
1147 	return 0;
1148 }
1149 
1150 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1151 				seq_puts(m, "LPSP: disabled\n"))
1152 
1153 static bool
1154 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1155 			      enum i915_power_well_id power_well_id)
1156 {
1157 	intel_wakeref_t wakeref;
1158 	bool is_enabled;
1159 
1160 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1161 	is_enabled = intel_display_power_well_is_enabled(i915,
1162 							 power_well_id);
1163 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1164 
1165 	return is_enabled;
1166 }
1167 
1168 static int i915_lpsp_status(struct seq_file *m, void *unused)
1169 {
1170 	struct drm_i915_private *i915 = node_to_i915(m->private);
1171 
1172 	switch (INTEL_GEN(i915)) {
1173 	case 12:
1174 	case 11:
1175 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1176 		break;
1177 	case 10:
1178 	case 9:
1179 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1180 		break;
1181 	default:
1182 		/*
1183 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1184 		 * support lpsp.
1185 		 */
1186 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1187 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1188 		else
1189 			seq_puts(m, "LPSP: not supported\n");
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1196 {
1197 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1198 	struct drm_device *dev = &dev_priv->drm;
1199 	struct intel_encoder *intel_encoder;
1200 	struct intel_digital_port *dig_port;
1201 	struct drm_connector *connector;
1202 	struct drm_connector_list_iter conn_iter;
1203 
1204 	drm_connector_list_iter_begin(dev, &conn_iter);
1205 	drm_for_each_connector_iter(connector, &conn_iter) {
1206 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1207 			continue;
1208 
1209 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1210 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1211 			continue;
1212 
1213 		dig_port = enc_to_dig_port(intel_encoder);
1214 		if (!dig_port->dp.can_mst)
1215 			continue;
1216 
1217 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1218 			   dig_port->base.base.base.id,
1219 			   dig_port->base.base.name);
1220 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1221 	}
1222 	drm_connector_list_iter_end(&conn_iter);
1223 
1224 	return 0;
1225 }
1226 
1227 static ssize_t i915_displayport_test_active_write(struct file *file,
1228 						  const char __user *ubuf,
1229 						  size_t len, loff_t *offp)
1230 {
1231 	char *input_buffer;
1232 	int status = 0;
1233 	struct drm_device *dev;
1234 	struct drm_connector *connector;
1235 	struct drm_connector_list_iter conn_iter;
1236 	struct intel_dp *intel_dp;
1237 	int val = 0;
1238 
1239 	dev = ((struct seq_file *)file->private_data)->private;
1240 
1241 	if (len == 0)
1242 		return 0;
1243 
1244 	input_buffer = memdup_user_nul(ubuf, len);
1245 	if (IS_ERR(input_buffer))
1246 		return PTR_ERR(input_buffer);
1247 
1248 	drm_dbg(&to_i915(dev)->drm,
1249 		"Copied %d bytes from user\n", (unsigned int)len);
1250 
1251 	drm_connector_list_iter_begin(dev, &conn_iter);
1252 	drm_for_each_connector_iter(connector, &conn_iter) {
1253 		struct intel_encoder *encoder;
1254 
1255 		if (connector->connector_type !=
1256 		    DRM_MODE_CONNECTOR_DisplayPort)
1257 			continue;
1258 
1259 		encoder = to_intel_encoder(connector->encoder);
1260 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1261 			continue;
1262 
1263 		if (encoder && connector->status == connector_status_connected) {
1264 			intel_dp = enc_to_intel_dp(encoder);
1265 			status = kstrtoint(input_buffer, 10, &val);
1266 			if (status < 0)
1267 				break;
1268 			drm_dbg(&to_i915(dev)->drm,
1269 				"Got %d for test active\n", val);
1270 			/* To prevent erroneous activation of the compliance
1271 			 * testing code, only accept an actual value of 1 here
1272 			 */
1273 			if (val == 1)
1274 				intel_dp->compliance.test_active = true;
1275 			else
1276 				intel_dp->compliance.test_active = false;
1277 		}
1278 	}
1279 	drm_connector_list_iter_end(&conn_iter);
1280 	kfree(input_buffer);
1281 	if (status < 0)
1282 		return status;
1283 
1284 	*offp += len;
1285 	return len;
1286 }
1287 
1288 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1289 {
1290 	struct drm_i915_private *dev_priv = m->private;
1291 	struct drm_device *dev = &dev_priv->drm;
1292 	struct drm_connector *connector;
1293 	struct drm_connector_list_iter conn_iter;
1294 	struct intel_dp *intel_dp;
1295 
1296 	drm_connector_list_iter_begin(dev, &conn_iter);
1297 	drm_for_each_connector_iter(connector, &conn_iter) {
1298 		struct intel_encoder *encoder;
1299 
1300 		if (connector->connector_type !=
1301 		    DRM_MODE_CONNECTOR_DisplayPort)
1302 			continue;
1303 
1304 		encoder = to_intel_encoder(connector->encoder);
1305 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1306 			continue;
1307 
1308 		if (encoder && connector->status == connector_status_connected) {
1309 			intel_dp = enc_to_intel_dp(encoder);
1310 			if (intel_dp->compliance.test_active)
1311 				seq_puts(m, "1");
1312 			else
1313 				seq_puts(m, "0");
1314 		} else
1315 			seq_puts(m, "0");
1316 	}
1317 	drm_connector_list_iter_end(&conn_iter);
1318 
1319 	return 0;
1320 }
1321 
1322 static int i915_displayport_test_active_open(struct inode *inode,
1323 					     struct file *file)
1324 {
1325 	return single_open(file, i915_displayport_test_active_show,
1326 			   inode->i_private);
1327 }
1328 
1329 static const struct file_operations i915_displayport_test_active_fops = {
1330 	.owner = THIS_MODULE,
1331 	.open = i915_displayport_test_active_open,
1332 	.read = seq_read,
1333 	.llseek = seq_lseek,
1334 	.release = single_release,
1335 	.write = i915_displayport_test_active_write
1336 };
1337 
1338 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1339 {
1340 	struct drm_i915_private *dev_priv = m->private;
1341 	struct drm_device *dev = &dev_priv->drm;
1342 	struct drm_connector *connector;
1343 	struct drm_connector_list_iter conn_iter;
1344 	struct intel_dp *intel_dp;
1345 
1346 	drm_connector_list_iter_begin(dev, &conn_iter);
1347 	drm_for_each_connector_iter(connector, &conn_iter) {
1348 		struct intel_encoder *encoder;
1349 
1350 		if (connector->connector_type !=
1351 		    DRM_MODE_CONNECTOR_DisplayPort)
1352 			continue;
1353 
1354 		encoder = to_intel_encoder(connector->encoder);
1355 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1356 			continue;
1357 
1358 		if (encoder && connector->status == connector_status_connected) {
1359 			intel_dp = enc_to_intel_dp(encoder);
1360 			if (intel_dp->compliance.test_type ==
1361 			    DP_TEST_LINK_EDID_READ)
1362 				seq_printf(m, "%lx",
1363 					   intel_dp->compliance.test_data.edid);
1364 			else if (intel_dp->compliance.test_type ==
1365 				 DP_TEST_LINK_VIDEO_PATTERN) {
1366 				seq_printf(m, "hdisplay: %d\n",
1367 					   intel_dp->compliance.test_data.hdisplay);
1368 				seq_printf(m, "vdisplay: %d\n",
1369 					   intel_dp->compliance.test_data.vdisplay);
1370 				seq_printf(m, "bpc: %u\n",
1371 					   intel_dp->compliance.test_data.bpc);
1372 			} else if (intel_dp->compliance.test_type ==
1373 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1374 				seq_printf(m, "pattern: %d\n",
1375 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1376 				seq_printf(m, "Number of lanes: %d\n",
1377 					   intel_dp->compliance.test_data.phytest.num_lanes);
1378 				seq_printf(m, "Link Rate: %d\n",
1379 					   intel_dp->compliance.test_data.phytest.link_rate);
1380 				seq_printf(m, "level: %02x\n",
1381 					   intel_dp->train_set[0]);
1382 			}
1383 		} else
1384 			seq_puts(m, "0");
1385 	}
1386 	drm_connector_list_iter_end(&conn_iter);
1387 
1388 	return 0;
1389 }
1390 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1391 
1392 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1393 {
1394 	struct drm_i915_private *dev_priv = m->private;
1395 	struct drm_device *dev = &dev_priv->drm;
1396 	struct drm_connector *connector;
1397 	struct drm_connector_list_iter conn_iter;
1398 	struct intel_dp *intel_dp;
1399 
1400 	drm_connector_list_iter_begin(dev, &conn_iter);
1401 	drm_for_each_connector_iter(connector, &conn_iter) {
1402 		struct intel_encoder *encoder;
1403 
1404 		if (connector->connector_type !=
1405 		    DRM_MODE_CONNECTOR_DisplayPort)
1406 			continue;
1407 
1408 		encoder = to_intel_encoder(connector->encoder);
1409 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1410 			continue;
1411 
1412 		if (encoder && connector->status == connector_status_connected) {
1413 			intel_dp = enc_to_intel_dp(encoder);
1414 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1415 		} else
1416 			seq_puts(m, "0");
1417 	}
1418 	drm_connector_list_iter_end(&conn_iter);
1419 
1420 	return 0;
1421 }
1422 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1423 
1424 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1425 {
1426 	struct drm_i915_private *dev_priv = m->private;
1427 	struct drm_device *dev = &dev_priv->drm;
1428 	int level;
1429 	int num_levels;
1430 
1431 	if (IS_CHERRYVIEW(dev_priv))
1432 		num_levels = 3;
1433 	else if (IS_VALLEYVIEW(dev_priv))
1434 		num_levels = 1;
1435 	else if (IS_G4X(dev_priv))
1436 		num_levels = 3;
1437 	else
1438 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1439 
1440 	drm_modeset_lock_all(dev);
1441 
1442 	for (level = 0; level < num_levels; level++) {
1443 		unsigned int latency = wm[level];
1444 
1445 		/*
1446 		 * - WM1+ latency values in 0.5us units
1447 		 * - latencies are in us on gen9/vlv/chv
1448 		 */
1449 		if (INTEL_GEN(dev_priv) >= 9 ||
1450 		    IS_VALLEYVIEW(dev_priv) ||
1451 		    IS_CHERRYVIEW(dev_priv) ||
1452 		    IS_G4X(dev_priv))
1453 			latency *= 10;
1454 		else if (level > 0)
1455 			latency *= 5;
1456 
1457 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1458 			   level, wm[level], latency / 10, latency % 10);
1459 	}
1460 
1461 	drm_modeset_unlock_all(dev);
1462 }
1463 
1464 static int pri_wm_latency_show(struct seq_file *m, void *data)
1465 {
1466 	struct drm_i915_private *dev_priv = m->private;
1467 	const u16 *latencies;
1468 
1469 	if (INTEL_GEN(dev_priv) >= 9)
1470 		latencies = dev_priv->wm.skl_latency;
1471 	else
1472 		latencies = dev_priv->wm.pri_latency;
1473 
1474 	wm_latency_show(m, latencies);
1475 
1476 	return 0;
1477 }
1478 
1479 static int spr_wm_latency_show(struct seq_file *m, void *data)
1480 {
1481 	struct drm_i915_private *dev_priv = m->private;
1482 	const u16 *latencies;
1483 
1484 	if (INTEL_GEN(dev_priv) >= 9)
1485 		latencies = dev_priv->wm.skl_latency;
1486 	else
1487 		latencies = dev_priv->wm.spr_latency;
1488 
1489 	wm_latency_show(m, latencies);
1490 
1491 	return 0;
1492 }
1493 
1494 static int cur_wm_latency_show(struct seq_file *m, void *data)
1495 {
1496 	struct drm_i915_private *dev_priv = m->private;
1497 	const u16 *latencies;
1498 
1499 	if (INTEL_GEN(dev_priv) >= 9)
1500 		latencies = dev_priv->wm.skl_latency;
1501 	else
1502 		latencies = dev_priv->wm.cur_latency;
1503 
1504 	wm_latency_show(m, latencies);
1505 
1506 	return 0;
1507 }
1508 
1509 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1510 {
1511 	struct drm_i915_private *dev_priv = inode->i_private;
1512 
1513 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1514 		return -ENODEV;
1515 
1516 	return single_open(file, pri_wm_latency_show, dev_priv);
1517 }
1518 
1519 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1520 {
1521 	struct drm_i915_private *dev_priv = inode->i_private;
1522 
1523 	if (HAS_GMCH(dev_priv))
1524 		return -ENODEV;
1525 
1526 	return single_open(file, spr_wm_latency_show, dev_priv);
1527 }
1528 
1529 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1530 {
1531 	struct drm_i915_private *dev_priv = inode->i_private;
1532 
1533 	if (HAS_GMCH(dev_priv))
1534 		return -ENODEV;
1535 
1536 	return single_open(file, cur_wm_latency_show, dev_priv);
1537 }
1538 
1539 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1540 				size_t len, loff_t *offp, u16 wm[8])
1541 {
1542 	struct seq_file *m = file->private_data;
1543 	struct drm_i915_private *dev_priv = m->private;
1544 	struct drm_device *dev = &dev_priv->drm;
1545 	u16 new[8] = { 0 };
1546 	int num_levels;
1547 	int level;
1548 	int ret;
1549 	char tmp[32];
1550 
1551 	if (IS_CHERRYVIEW(dev_priv))
1552 		num_levels = 3;
1553 	else if (IS_VALLEYVIEW(dev_priv))
1554 		num_levels = 1;
1555 	else if (IS_G4X(dev_priv))
1556 		num_levels = 3;
1557 	else
1558 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1559 
1560 	if (len >= sizeof(tmp))
1561 		return -EINVAL;
1562 
1563 	if (copy_from_user(tmp, ubuf, len))
1564 		return -EFAULT;
1565 
1566 	tmp[len] = '\0';
1567 
1568 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1569 		     &new[0], &new[1], &new[2], &new[3],
1570 		     &new[4], &new[5], &new[6], &new[7]);
1571 	if (ret != num_levels)
1572 		return -EINVAL;
1573 
1574 	drm_modeset_lock_all(dev);
1575 
1576 	for (level = 0; level < num_levels; level++)
1577 		wm[level] = new[level];
1578 
1579 	drm_modeset_unlock_all(dev);
1580 
1581 	return len;
1582 }
1583 
1584 
1585 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1586 				    size_t len, loff_t *offp)
1587 {
1588 	struct seq_file *m = file->private_data;
1589 	struct drm_i915_private *dev_priv = m->private;
1590 	u16 *latencies;
1591 
1592 	if (INTEL_GEN(dev_priv) >= 9)
1593 		latencies = dev_priv->wm.skl_latency;
1594 	else
1595 		latencies = dev_priv->wm.pri_latency;
1596 
1597 	return wm_latency_write(file, ubuf, len, offp, latencies);
1598 }
1599 
1600 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1601 				    size_t len, loff_t *offp)
1602 {
1603 	struct seq_file *m = file->private_data;
1604 	struct drm_i915_private *dev_priv = m->private;
1605 	u16 *latencies;
1606 
1607 	if (INTEL_GEN(dev_priv) >= 9)
1608 		latencies = dev_priv->wm.skl_latency;
1609 	else
1610 		latencies = dev_priv->wm.spr_latency;
1611 
1612 	return wm_latency_write(file, ubuf, len, offp, latencies);
1613 }
1614 
1615 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1616 				    size_t len, loff_t *offp)
1617 {
1618 	struct seq_file *m = file->private_data;
1619 	struct drm_i915_private *dev_priv = m->private;
1620 	u16 *latencies;
1621 
1622 	if (INTEL_GEN(dev_priv) >= 9)
1623 		latencies = dev_priv->wm.skl_latency;
1624 	else
1625 		latencies = dev_priv->wm.cur_latency;
1626 
1627 	return wm_latency_write(file, ubuf, len, offp, latencies);
1628 }
1629 
1630 static const struct file_operations i915_pri_wm_latency_fops = {
1631 	.owner = THIS_MODULE,
1632 	.open = pri_wm_latency_open,
1633 	.read = seq_read,
1634 	.llseek = seq_lseek,
1635 	.release = single_release,
1636 	.write = pri_wm_latency_write
1637 };
1638 
1639 static const struct file_operations i915_spr_wm_latency_fops = {
1640 	.owner = THIS_MODULE,
1641 	.open = spr_wm_latency_open,
1642 	.read = seq_read,
1643 	.llseek = seq_lseek,
1644 	.release = single_release,
1645 	.write = spr_wm_latency_write
1646 };
1647 
1648 static const struct file_operations i915_cur_wm_latency_fops = {
1649 	.owner = THIS_MODULE,
1650 	.open = cur_wm_latency_open,
1651 	.read = seq_read,
1652 	.llseek = seq_lseek,
1653 	.release = single_release,
1654 	.write = cur_wm_latency_write
1655 };
1656 
1657 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1658 {
1659 	struct drm_i915_private *dev_priv = m->private;
1660 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1661 
1662 	/* Synchronize with everything first in case there's been an HPD
1663 	 * storm, but we haven't finished handling it in the kernel yet
1664 	 */
1665 	intel_synchronize_irq(dev_priv);
1666 	flush_work(&dev_priv->hotplug.dig_port_work);
1667 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1668 
1669 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1670 	seq_printf(m, "Detected: %s\n",
1671 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1672 
1673 	return 0;
1674 }
1675 
1676 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1677 					const char __user *ubuf, size_t len,
1678 					loff_t *offp)
1679 {
1680 	struct seq_file *m = file->private_data;
1681 	struct drm_i915_private *dev_priv = m->private;
1682 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1683 	unsigned int new_threshold;
1684 	int i;
1685 	char *newline;
1686 	char tmp[16];
1687 
1688 	if (len >= sizeof(tmp))
1689 		return -EINVAL;
1690 
1691 	if (copy_from_user(tmp, ubuf, len))
1692 		return -EFAULT;
1693 
1694 	tmp[len] = '\0';
1695 
1696 	/* Strip newline, if any */
1697 	newline = strchr(tmp, '\n');
1698 	if (newline)
1699 		*newline = '\0';
1700 
1701 	if (strcmp(tmp, "reset") == 0)
1702 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1703 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1704 		return -EINVAL;
1705 
1706 	if (new_threshold > 0)
1707 		drm_dbg_kms(&dev_priv->drm,
1708 			    "Setting HPD storm detection threshold to %d\n",
1709 			    new_threshold);
1710 	else
1711 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1712 
1713 	spin_lock_irq(&dev_priv->irq_lock);
1714 	hotplug->hpd_storm_threshold = new_threshold;
1715 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1716 	for_each_hpd_pin(i)
1717 		hotplug->stats[i].count = 0;
1718 	spin_unlock_irq(&dev_priv->irq_lock);
1719 
1720 	/* Re-enable hpd immediately if we were in an irq storm */
1721 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1722 
1723 	return len;
1724 }
1725 
1726 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1727 {
1728 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1729 }
1730 
1731 static const struct file_operations i915_hpd_storm_ctl_fops = {
1732 	.owner = THIS_MODULE,
1733 	.open = i915_hpd_storm_ctl_open,
1734 	.read = seq_read,
1735 	.llseek = seq_lseek,
1736 	.release = single_release,
1737 	.write = i915_hpd_storm_ctl_write
1738 };
1739 
1740 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1741 {
1742 	struct drm_i915_private *dev_priv = m->private;
1743 
1744 	seq_printf(m, "Enabled: %s\n",
1745 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1746 
1747 	return 0;
1748 }
1749 
1750 static int
1751 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1752 {
1753 	return single_open(file, i915_hpd_short_storm_ctl_show,
1754 			   inode->i_private);
1755 }
1756 
1757 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1758 					      const char __user *ubuf,
1759 					      size_t len, loff_t *offp)
1760 {
1761 	struct seq_file *m = file->private_data;
1762 	struct drm_i915_private *dev_priv = m->private;
1763 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1764 	char *newline;
1765 	char tmp[16];
1766 	int i;
1767 	bool new_state;
1768 
1769 	if (len >= sizeof(tmp))
1770 		return -EINVAL;
1771 
1772 	if (copy_from_user(tmp, ubuf, len))
1773 		return -EFAULT;
1774 
1775 	tmp[len] = '\0';
1776 
1777 	/* Strip newline, if any */
1778 	newline = strchr(tmp, '\n');
1779 	if (newline)
1780 		*newline = '\0';
1781 
1782 	/* Reset to the "default" state for this system */
1783 	if (strcmp(tmp, "reset") == 0)
1784 		new_state = !HAS_DP_MST(dev_priv);
1785 	else if (kstrtobool(tmp, &new_state) != 0)
1786 		return -EINVAL;
1787 
1788 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1789 		    new_state ? "En" : "Dis");
1790 
1791 	spin_lock_irq(&dev_priv->irq_lock);
1792 	hotplug->hpd_short_storm_enabled = new_state;
1793 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1794 	for_each_hpd_pin(i)
1795 		hotplug->stats[i].count = 0;
1796 	spin_unlock_irq(&dev_priv->irq_lock);
1797 
1798 	/* Re-enable hpd immediately if we were in an irq storm */
1799 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1800 
1801 	return len;
1802 }
1803 
1804 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1805 	.owner = THIS_MODULE,
1806 	.open = i915_hpd_short_storm_ctl_open,
1807 	.read = seq_read,
1808 	.llseek = seq_lseek,
1809 	.release = single_release,
1810 	.write = i915_hpd_short_storm_ctl_write,
1811 };
1812 
1813 static int i915_drrs_ctl_set(void *data, u64 val)
1814 {
1815 	struct drm_i915_private *dev_priv = data;
1816 	struct drm_device *dev = &dev_priv->drm;
1817 	struct intel_crtc *crtc;
1818 
1819 	if (INTEL_GEN(dev_priv) < 7)
1820 		return -ENODEV;
1821 
1822 	for_each_intel_crtc(dev, crtc) {
1823 		struct drm_connector_list_iter conn_iter;
1824 		struct intel_crtc_state *crtc_state;
1825 		struct drm_connector *connector;
1826 		struct drm_crtc_commit *commit;
1827 		int ret;
1828 
1829 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1830 		if (ret)
1831 			return ret;
1832 
1833 		crtc_state = to_intel_crtc_state(crtc->base.state);
1834 
1835 		if (!crtc_state->hw.active ||
1836 		    !crtc_state->has_drrs)
1837 			goto out;
1838 
1839 		commit = crtc_state->uapi.commit;
1840 		if (commit) {
1841 			ret = wait_for_completion_interruptible(&commit->hw_done);
1842 			if (ret)
1843 				goto out;
1844 		}
1845 
1846 		drm_connector_list_iter_begin(dev, &conn_iter);
1847 		drm_for_each_connector_iter(connector, &conn_iter) {
1848 			struct intel_encoder *encoder;
1849 			struct intel_dp *intel_dp;
1850 
1851 			if (!(crtc_state->uapi.connector_mask &
1852 			      drm_connector_mask(connector)))
1853 				continue;
1854 
1855 			encoder = intel_attached_encoder(to_intel_connector(connector));
1856 			if (encoder->type != INTEL_OUTPUT_EDP)
1857 				continue;
1858 
1859 			drm_dbg(&dev_priv->drm,
1860 				"Manually %sabling DRRS. %llu\n",
1861 				val ? "en" : "dis", val);
1862 
1863 			intel_dp = enc_to_intel_dp(encoder);
1864 			if (val)
1865 				intel_edp_drrs_enable(intel_dp,
1866 						      crtc_state);
1867 			else
1868 				intel_edp_drrs_disable(intel_dp,
1869 						       crtc_state);
1870 		}
1871 		drm_connector_list_iter_end(&conn_iter);
1872 
1873 out:
1874 		drm_modeset_unlock(&crtc->base.mutex);
1875 		if (ret)
1876 			return ret;
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1883 
1884 static ssize_t
1885 i915_fifo_underrun_reset_write(struct file *filp,
1886 			       const char __user *ubuf,
1887 			       size_t cnt, loff_t *ppos)
1888 {
1889 	struct drm_i915_private *dev_priv = filp->private_data;
1890 	struct intel_crtc *intel_crtc;
1891 	struct drm_device *dev = &dev_priv->drm;
1892 	int ret;
1893 	bool reset;
1894 
1895 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1896 	if (ret)
1897 		return ret;
1898 
1899 	if (!reset)
1900 		return cnt;
1901 
1902 	for_each_intel_crtc(dev, intel_crtc) {
1903 		struct drm_crtc_commit *commit;
1904 		struct intel_crtc_state *crtc_state;
1905 
1906 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
1907 		if (ret)
1908 			return ret;
1909 
1910 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
1911 		commit = crtc_state->uapi.commit;
1912 		if (commit) {
1913 			ret = wait_for_completion_interruptible(&commit->hw_done);
1914 			if (!ret)
1915 				ret = wait_for_completion_interruptible(&commit->flip_done);
1916 		}
1917 
1918 		if (!ret && crtc_state->hw.active) {
1919 			drm_dbg_kms(&dev_priv->drm,
1920 				    "Re-arming FIFO underruns on pipe %c\n",
1921 				    pipe_name(intel_crtc->pipe));
1922 
1923 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
1924 		}
1925 
1926 		drm_modeset_unlock(&intel_crtc->base.mutex);
1927 
1928 		if (ret)
1929 			return ret;
1930 	}
1931 
1932 	ret = intel_fbc_reset_underrun(dev_priv);
1933 	if (ret)
1934 		return ret;
1935 
1936 	return cnt;
1937 }
1938 
1939 static const struct file_operations i915_fifo_underrun_reset_ops = {
1940 	.owner = THIS_MODULE,
1941 	.open = simple_open,
1942 	.write = i915_fifo_underrun_reset_write,
1943 	.llseek = default_llseek,
1944 };
1945 
1946 static const struct drm_info_list intel_display_debugfs_list[] = {
1947 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
1948 	{"i915_fbc_status", i915_fbc_status, 0},
1949 	{"i915_ips_status", i915_ips_status, 0},
1950 	{"i915_sr_status", i915_sr_status, 0},
1951 	{"i915_opregion", i915_opregion, 0},
1952 	{"i915_vbt", i915_vbt, 0},
1953 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1954 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
1955 	{"i915_power_domain_info", i915_power_domain_info, 0},
1956 	{"i915_dmc_info", i915_dmc_info, 0},
1957 	{"i915_display_info", i915_display_info, 0},
1958 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
1959 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
1960 	{"i915_ddb_info", i915_ddb_info, 0},
1961 	{"i915_drrs_status", i915_drrs_status, 0},
1962 	{"i915_lpsp_status", i915_lpsp_status, 0},
1963 };
1964 
1965 static const struct {
1966 	const char *name;
1967 	const struct file_operations *fops;
1968 } intel_display_debugfs_files[] = {
1969 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
1970 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
1971 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
1972 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
1973 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
1974 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
1975 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
1976 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
1977 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
1978 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
1979 	{"i915_ipc_status", &i915_ipc_status_fops},
1980 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
1981 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
1982 };
1983 
1984 void intel_display_debugfs_register(struct drm_i915_private *i915)
1985 {
1986 	struct drm_minor *minor = i915->drm.primary;
1987 	int i;
1988 
1989 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
1990 		debugfs_create_file(intel_display_debugfs_files[i].name,
1991 				    S_IRUGO | S_IWUSR,
1992 				    minor->debugfs_root,
1993 				    to_i915(minor->dev),
1994 				    intel_display_debugfs_files[i].fops);
1995 	}
1996 
1997 	drm_debugfs_create_files(intel_display_debugfs_list,
1998 				 ARRAY_SIZE(intel_display_debugfs_list),
1999 				 minor->debugfs_root, minor);
2000 }
2001 
2002 static int i915_panel_show(struct seq_file *m, void *data)
2003 {
2004 	struct drm_connector *connector = m->private;
2005 	struct intel_dp *intel_dp =
2006 		intel_attached_dp(to_intel_connector(connector));
2007 
2008 	if (connector->status != connector_status_connected)
2009 		return -ENODEV;
2010 
2011 	seq_printf(m, "Panel power up delay: %d\n",
2012 		   intel_dp->panel_power_up_delay);
2013 	seq_printf(m, "Panel power down delay: %d\n",
2014 		   intel_dp->panel_power_down_delay);
2015 	seq_printf(m, "Backlight on delay: %d\n",
2016 		   intel_dp->backlight_on_delay);
2017 	seq_printf(m, "Backlight off delay: %d\n",
2018 		   intel_dp->backlight_off_delay);
2019 
2020 	return 0;
2021 }
2022 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2023 
2024 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2025 {
2026 	struct drm_connector *connector = m->private;
2027 	struct intel_connector *intel_connector = to_intel_connector(connector);
2028 
2029 	if (connector->status != connector_status_connected)
2030 		return -ENODEV;
2031 
2032 	/* HDCP is supported by connector */
2033 	if (!intel_connector->hdcp.shim)
2034 		return -EINVAL;
2035 
2036 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2037 		   connector->base.id);
2038 	intel_hdcp_info(m, intel_connector);
2039 
2040 	return 0;
2041 }
2042 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2043 
2044 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2045 				seq_puts(m, "LPSP: incapable\n"))
2046 
2047 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2048 {
2049 	struct drm_connector *connector = m->private;
2050 	struct drm_i915_private *i915 = to_i915(connector->dev);
2051 	struct intel_encoder *encoder;
2052 
2053 	encoder = intel_attached_encoder(to_intel_connector(connector));
2054 	if (!encoder)
2055 		return -ENODEV;
2056 
2057 	if (connector->status != connector_status_connected)
2058 		return -ENODEV;
2059 
2060 	switch (INTEL_GEN(i915)) {
2061 	case 12:
2062 		/*
2063 		 * Actually TGL can drive LPSP on port till DDI_C
2064 		 * but there is no physical connected DDI_C on TGL sku's,
2065 		 * even driver is not initilizing DDI_C port for gen12.
2066 		 */
2067 		LPSP_CAPABLE(encoder->port <= PORT_B);
2068 		break;
2069 	case 11:
2070 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2071 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2072 		break;
2073 	case 10:
2074 	case 9:
2075 		LPSP_CAPABLE(encoder->port == PORT_A &&
2076 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2077 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2078 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2079 		break;
2080 	default:
2081 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2082 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2083 	}
2084 
2085 	return 0;
2086 }
2087 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2088 
2089 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2090 {
2091 	struct drm_connector *connector = m->private;
2092 	struct drm_device *dev = connector->dev;
2093 	struct drm_crtc *crtc;
2094 	struct intel_dp *intel_dp;
2095 	struct drm_modeset_acquire_ctx ctx;
2096 	struct intel_crtc_state *crtc_state = NULL;
2097 	int ret = 0;
2098 	bool try_again = false;
2099 
2100 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2101 
2102 	do {
2103 		try_again = false;
2104 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2105 				       &ctx);
2106 		if (ret) {
2107 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2108 				try_again = true;
2109 				continue;
2110 			}
2111 			break;
2112 		}
2113 		crtc = connector->state->crtc;
2114 		if (connector->status != connector_status_connected || !crtc) {
2115 			ret = -ENODEV;
2116 			break;
2117 		}
2118 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2119 		if (ret == -EDEADLK) {
2120 			ret = drm_modeset_backoff(&ctx);
2121 			if (!ret) {
2122 				try_again = true;
2123 				continue;
2124 			}
2125 			break;
2126 		} else if (ret) {
2127 			break;
2128 		}
2129 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2130 		crtc_state = to_intel_crtc_state(crtc->state);
2131 		seq_printf(m, "DSC_Enabled: %s\n",
2132 			   yesno(crtc_state->dsc.compression_enable));
2133 		seq_printf(m, "DSC_Sink_Support: %s\n",
2134 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2135 		seq_printf(m, "Force_DSC_Enable: %s\n",
2136 			   yesno(intel_dp->force_dsc_en));
2137 		if (!intel_dp_is_edp(intel_dp))
2138 			seq_printf(m, "FEC_Sink_Support: %s\n",
2139 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2140 	} while (try_again);
2141 
2142 	drm_modeset_drop_locks(&ctx);
2143 	drm_modeset_acquire_fini(&ctx);
2144 
2145 	return ret;
2146 }
2147 
2148 static ssize_t i915_dsc_fec_support_write(struct file *file,
2149 					  const char __user *ubuf,
2150 					  size_t len, loff_t *offp)
2151 {
2152 	bool dsc_enable = false;
2153 	int ret;
2154 	struct drm_connector *connector =
2155 		((struct seq_file *)file->private_data)->private;
2156 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2157 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2158 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2159 
2160 	if (len == 0)
2161 		return 0;
2162 
2163 	drm_dbg(&i915->drm,
2164 		"Copied %zu bytes from user to force DSC\n", len);
2165 
2166 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2167 	if (ret < 0)
2168 		return ret;
2169 
2170 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2171 		(dsc_enable) ? "true" : "false");
2172 	intel_dp->force_dsc_en = dsc_enable;
2173 
2174 	*offp += len;
2175 	return len;
2176 }
2177 
2178 static int i915_dsc_fec_support_open(struct inode *inode,
2179 				     struct file *file)
2180 {
2181 	return single_open(file, i915_dsc_fec_support_show,
2182 			   inode->i_private);
2183 }
2184 
2185 static const struct file_operations i915_dsc_fec_support_fops = {
2186 	.owner = THIS_MODULE,
2187 	.open = i915_dsc_fec_support_open,
2188 	.read = seq_read,
2189 	.llseek = seq_lseek,
2190 	.release = single_release,
2191 	.write = i915_dsc_fec_support_write
2192 };
2193 
2194 /**
2195  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2196  * @connector: pointer to a registered drm_connector
2197  *
2198  * Cleanup will be done by drm_connector_unregister() through a call to
2199  * drm_debugfs_connector_remove().
2200  *
2201  * Returns 0 on success, negative error codes on error.
2202  */
2203 int intel_connector_debugfs_add(struct drm_connector *connector)
2204 {
2205 	struct dentry *root = connector->debugfs_entry;
2206 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2207 
2208 	/* The connector must have been registered beforehands. */
2209 	if (!root)
2210 		return -ENODEV;
2211 
2212 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2213 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2214 				    connector, &i915_panel_fops);
2215 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2216 				    connector, &i915_psr_sink_status_fops);
2217 	}
2218 
2219 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2220 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2221 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2222 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2223 				    connector, &i915_hdcp_sink_capability_fops);
2224 	}
2225 
2226 	if (INTEL_GEN(dev_priv) >= 10 &&
2227 	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2228 	      !to_intel_connector(connector)->mst_port) ||
2229 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2230 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2231 				    connector, &i915_dsc_fec_support_fops);
2232 
2233 	/* Legacy panels doesn't lpsp on any platform */
2234 	if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2235 	     IS_BROADWELL(dev_priv)) &&
2236 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2237 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2238 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2239 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2240 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2241 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2242 				    connector, &i915_lpsp_capability_fops);
2243 
2244 	return 0;
2245 }
2246