1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dp.h"
15 #include "intel_fbc.h"
16 #include "intel_hdcp.h"
17 #include "intel_hdmi.h"
18 #include "intel_pm.h"
19 #include "intel_psr.h"
20 #include "intel_sideband.h"
21 
22 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
23 {
24 	return to_i915(node->minor->dev);
25 }
26 
27 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
28 {
29 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
30 
31 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
32 		   dev_priv->fb_tracking.busy_bits);
33 
34 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
35 		   dev_priv->fb_tracking.flip_bits);
36 
37 	return 0;
38 }
39 
40 static int i915_fbc_status(struct seq_file *m, void *unused)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	struct intel_fbc *fbc = &dev_priv->fbc;
44 	intel_wakeref_t wakeref;
45 
46 	if (!HAS_FBC(dev_priv))
47 		return -ENODEV;
48 
49 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
50 	mutex_lock(&fbc->lock);
51 
52 	if (intel_fbc_is_active(dev_priv))
53 		seq_puts(m, "FBC enabled\n");
54 	else
55 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
56 
57 	if (intel_fbc_is_active(dev_priv)) {
58 		u32 mask;
59 
60 		if (INTEL_GEN(dev_priv) >= 8)
61 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
62 		else if (INTEL_GEN(dev_priv) >= 7)
63 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
64 		else if (INTEL_GEN(dev_priv) >= 5)
65 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
66 		else if (IS_G4X(dev_priv))
67 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
68 		else
69 			mask = intel_de_read(dev_priv, FBC_STATUS) &
70 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
71 
72 		seq_printf(m, "Compressing: %s\n", yesno(mask));
73 	}
74 
75 	mutex_unlock(&fbc->lock);
76 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
77 
78 	return 0;
79 }
80 
81 static int i915_fbc_false_color_get(void *data, u64 *val)
82 {
83 	struct drm_i915_private *dev_priv = data;
84 
85 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
86 		return -ENODEV;
87 
88 	*val = dev_priv->fbc.false_color;
89 
90 	return 0;
91 }
92 
93 static int i915_fbc_false_color_set(void *data, u64 val)
94 {
95 	struct drm_i915_private *dev_priv = data;
96 	u32 reg;
97 
98 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
99 		return -ENODEV;
100 
101 	mutex_lock(&dev_priv->fbc.lock);
102 
103 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
104 	dev_priv->fbc.false_color = val;
105 
106 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
107 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
108 
109 	mutex_unlock(&dev_priv->fbc.lock);
110 	return 0;
111 }
112 
113 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
114 			i915_fbc_false_color_get, i915_fbc_false_color_set,
115 			"%llu\n");
116 
117 static int i915_ips_status(struct seq_file *m, void *unused)
118 {
119 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
120 	intel_wakeref_t wakeref;
121 
122 	if (!HAS_IPS(dev_priv))
123 		return -ENODEV;
124 
125 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
126 
127 	seq_printf(m, "Enabled by kernel parameter: %s\n",
128 		   yesno(dev_priv->params.enable_ips));
129 
130 	if (INTEL_GEN(dev_priv) >= 8) {
131 		seq_puts(m, "Currently: unknown\n");
132 	} else {
133 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
134 			seq_puts(m, "Currently: enabled\n");
135 		else
136 			seq_puts(m, "Currently: disabled\n");
137 	}
138 
139 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
140 
141 	return 0;
142 }
143 
144 static int i915_sr_status(struct seq_file *m, void *unused)
145 {
146 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
147 	intel_wakeref_t wakeref;
148 	bool sr_enabled = false;
149 
150 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
151 
152 	if (INTEL_GEN(dev_priv) >= 9)
153 		/* no global SR status; inspect per-plane WM */;
154 	else if (HAS_PCH_SPLIT(dev_priv))
155 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
156 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
157 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
158 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
159 	else if (IS_I915GM(dev_priv))
160 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
161 	else if (IS_PINEVIEW(dev_priv))
162 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
163 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
164 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
165 
166 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
167 
168 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
169 
170 	return 0;
171 }
172 
173 static int i915_opregion(struct seq_file *m, void *unused)
174 {
175 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
176 
177 	if (opregion->header)
178 		seq_write(m, opregion->header, OPREGION_SIZE);
179 
180 	return 0;
181 }
182 
183 static int i915_vbt(struct seq_file *m, void *unused)
184 {
185 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
186 
187 	if (opregion->vbt)
188 		seq_write(m, opregion->vbt, opregion->vbt_size);
189 
190 	return 0;
191 }
192 
193 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
194 {
195 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
196 	struct drm_device *dev = &dev_priv->drm;
197 	struct intel_framebuffer *fbdev_fb = NULL;
198 	struct drm_framebuffer *drm_fb;
199 
200 #ifdef CONFIG_DRM_FBDEV_EMULATION
201 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
202 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
203 
204 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
205 			   fbdev_fb->base.width,
206 			   fbdev_fb->base.height,
207 			   fbdev_fb->base.format->depth,
208 			   fbdev_fb->base.format->cpp[0] * 8,
209 			   fbdev_fb->base.modifier,
210 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
211 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
212 		seq_putc(m, '\n');
213 	}
214 #endif
215 
216 	mutex_lock(&dev->mode_config.fb_lock);
217 	drm_for_each_fb(drm_fb, dev) {
218 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
219 		if (fb == fbdev_fb)
220 			continue;
221 
222 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
223 			   fb->base.width,
224 			   fb->base.height,
225 			   fb->base.format->depth,
226 			   fb->base.format->cpp[0] * 8,
227 			   fb->base.modifier,
228 			   drm_framebuffer_read_refcount(&fb->base));
229 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
230 		seq_putc(m, '\n');
231 	}
232 	mutex_unlock(&dev->mode_config.fb_lock);
233 
234 	return 0;
235 }
236 
237 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
238 {
239 	u8 val;
240 	static const char * const sink_status[] = {
241 		"inactive",
242 		"transition to active, capture and display",
243 		"active, display from RFB",
244 		"active, capture and display on sink device timings",
245 		"transition to inactive, capture and display, timing re-sync",
246 		"reserved",
247 		"reserved",
248 		"sink internal error",
249 	};
250 	struct drm_connector *connector = m->private;
251 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
252 	struct intel_dp *intel_dp =
253 		intel_attached_dp(to_intel_connector(connector));
254 	int ret;
255 
256 	if (!CAN_PSR(dev_priv)) {
257 		seq_puts(m, "PSR Unsupported\n");
258 		return -ENODEV;
259 	}
260 
261 	if (connector->status != connector_status_connected)
262 		return -ENODEV;
263 
264 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
265 
266 	if (ret == 1) {
267 		const char *str = "unknown";
268 
269 		val &= DP_PSR_SINK_STATE_MASK;
270 		if (val < ARRAY_SIZE(sink_status))
271 			str = sink_status[val];
272 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
273 	} else {
274 		return ret;
275 	}
276 
277 	return 0;
278 }
279 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
280 
281 static void
282 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
283 {
284 	u32 val, status_val;
285 	const char *status = "unknown";
286 
287 	if (dev_priv->psr.psr2_enabled) {
288 		static const char * const live_status[] = {
289 			"IDLE",
290 			"CAPTURE",
291 			"CAPTURE_FS",
292 			"SLEEP",
293 			"BUFON_FW",
294 			"ML_UP",
295 			"SU_STANDBY",
296 			"FAST_SLEEP",
297 			"DEEP_SLEEP",
298 			"BUF_ON",
299 			"TG_ON"
300 		};
301 		val = intel_de_read(dev_priv,
302 				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
303 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
304 			      EDP_PSR2_STATUS_STATE_SHIFT;
305 		if (status_val < ARRAY_SIZE(live_status))
306 			status = live_status[status_val];
307 	} else {
308 		static const char * const live_status[] = {
309 			"IDLE",
310 			"SRDONACK",
311 			"SRDENT",
312 			"BUFOFF",
313 			"BUFON",
314 			"AUXACK",
315 			"SRDOFFACK",
316 			"SRDENT_ON",
317 		};
318 		val = intel_de_read(dev_priv,
319 				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
320 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
321 			      EDP_PSR_STATUS_STATE_SHIFT;
322 		if (status_val < ARRAY_SIZE(live_status))
323 			status = live_status[status_val];
324 	}
325 
326 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
327 }
328 
329 static int i915_edp_psr_status(struct seq_file *m, void *data)
330 {
331 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
332 	struct i915_psr *psr = &dev_priv->psr;
333 	intel_wakeref_t wakeref;
334 	const char *status;
335 	bool enabled;
336 	u32 val;
337 
338 	if (!HAS_PSR(dev_priv))
339 		return -ENODEV;
340 
341 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
342 	if (psr->dp)
343 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
344 	seq_puts(m, "\n");
345 
346 	if (!psr->sink_support)
347 		return 0;
348 
349 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
350 	mutex_lock(&psr->lock);
351 
352 	if (psr->enabled)
353 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
354 	else
355 		status = "disabled";
356 	seq_printf(m, "PSR mode: %s\n", status);
357 
358 	if (!psr->enabled) {
359 		seq_printf(m, "PSR sink not reliable: %s\n",
360 			   yesno(psr->sink_not_reliable));
361 
362 		goto unlock;
363 	}
364 
365 	if (psr->psr2_enabled) {
366 		val = intel_de_read(dev_priv,
367 				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
368 		enabled = val & EDP_PSR2_ENABLE;
369 	} else {
370 		val = intel_de_read(dev_priv,
371 				    EDP_PSR_CTL(dev_priv->psr.transcoder));
372 		enabled = val & EDP_PSR_ENABLE;
373 	}
374 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
375 		   enableddisabled(enabled), val);
376 	psr_source_status(dev_priv, m);
377 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
378 		   psr->busy_frontbuffer_bits);
379 
380 	/*
381 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
382 	 */
383 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
384 		val = intel_de_read(dev_priv,
385 				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
386 		val &= EDP_PSR_PERF_CNT_MASK;
387 		seq_printf(m, "Performance counter: %u\n", val);
388 	}
389 
390 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
391 		seq_printf(m, "Last attempted entry at: %lld\n",
392 			   psr->last_entry_attempt);
393 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
394 	}
395 
396 	if (psr->psr2_enabled) {
397 		u32 su_frames_val[3];
398 		int frame;
399 
400 		/*
401 		 * Reading all 3 registers before hand to minimize crossing a
402 		 * frame boundary between register reads
403 		 */
404 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
405 			val = intel_de_read(dev_priv,
406 					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
407 			su_frames_val[frame / 3] = val;
408 		}
409 
410 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
411 
412 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
413 			u32 su_blocks;
414 
415 			su_blocks = su_frames_val[frame / 3] &
416 				    PSR2_SU_STATUS_MASK(frame);
417 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
418 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
419 		}
420 
421 		seq_printf(m, "PSR2 selective fetch: %s\n",
422 			   enableddisabled(psr->psr2_sel_fetch_enabled));
423 	}
424 
425 unlock:
426 	mutex_unlock(&psr->lock);
427 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
428 
429 	return 0;
430 }
431 
432 static int
433 i915_edp_psr_debug_set(void *data, u64 val)
434 {
435 	struct drm_i915_private *dev_priv = data;
436 	intel_wakeref_t wakeref;
437 	int ret;
438 
439 	if (!CAN_PSR(dev_priv))
440 		return -ENODEV;
441 
442 	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
443 
444 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
445 
446 	ret = intel_psr_debug_set(dev_priv, val);
447 
448 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
449 
450 	return ret;
451 }
452 
453 static int
454 i915_edp_psr_debug_get(void *data, u64 *val)
455 {
456 	struct drm_i915_private *dev_priv = data;
457 
458 	if (!CAN_PSR(dev_priv))
459 		return -ENODEV;
460 
461 	*val = READ_ONCE(dev_priv->psr.debug);
462 	return 0;
463 }
464 
465 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
466 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
467 			"%llu\n");
468 
469 static int i915_power_domain_info(struct seq_file *m, void *unused)
470 {
471 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
472 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
473 	int i;
474 
475 	mutex_lock(&power_domains->lock);
476 
477 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
478 	for (i = 0; i < power_domains->power_well_count; i++) {
479 		struct i915_power_well *power_well;
480 		enum intel_display_power_domain power_domain;
481 
482 		power_well = &power_domains->power_wells[i];
483 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
484 			   power_well->count);
485 
486 		for_each_power_domain(power_domain, power_well->desc->domains)
487 			seq_printf(m, "  %-23s %d\n",
488 				 intel_display_power_domain_str(power_domain),
489 				 power_domains->domain_use_count[power_domain]);
490 	}
491 
492 	mutex_unlock(&power_domains->lock);
493 
494 	return 0;
495 }
496 
497 static int i915_dmc_info(struct seq_file *m, void *unused)
498 {
499 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
500 	intel_wakeref_t wakeref;
501 	struct intel_csr *csr;
502 	i915_reg_t dc5_reg, dc6_reg = {};
503 
504 	if (!HAS_CSR(dev_priv))
505 		return -ENODEV;
506 
507 	csr = &dev_priv->csr;
508 
509 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
510 
511 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
512 	seq_printf(m, "path: %s\n", csr->fw_path);
513 
514 	if (!csr->dmc_payload)
515 		goto out;
516 
517 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
518 		   CSR_VERSION_MINOR(csr->version));
519 
520 	if (INTEL_GEN(dev_priv) >= 12) {
521 		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
522 		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
523 		/*
524 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
525 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
526 		 * reg for DC3CO debugging and validation,
527 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
528 		 */
529 		seq_printf(m, "DC3CO count: %d\n",
530 			   intel_de_read(dev_priv, DMC_DEBUG3));
531 	} else {
532 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
533 						 SKL_CSR_DC3_DC5_COUNT;
534 		if (!IS_GEN9_LP(dev_priv))
535 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
536 	}
537 
538 	seq_printf(m, "DC3 -> DC5 count: %d\n",
539 		   intel_de_read(dev_priv, dc5_reg));
540 	if (dc6_reg.reg)
541 		seq_printf(m, "DC5 -> DC6 count: %d\n",
542 			   intel_de_read(dev_priv, dc6_reg));
543 
544 out:
545 	seq_printf(m, "program base: 0x%08x\n",
546 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
547 	seq_printf(m, "ssp base: 0x%08x\n",
548 		   intel_de_read(dev_priv, CSR_SSP_BASE));
549 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
550 
551 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
552 
553 	return 0;
554 }
555 
556 static void intel_seq_print_mode(struct seq_file *m, int tabs,
557 				 const struct drm_display_mode *mode)
558 {
559 	int i;
560 
561 	for (i = 0; i < tabs; i++)
562 		seq_putc(m, '\t');
563 
564 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
565 }
566 
567 static void intel_encoder_info(struct seq_file *m,
568 			       struct intel_crtc *crtc,
569 			       struct intel_encoder *encoder)
570 {
571 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
572 	struct drm_connector_list_iter conn_iter;
573 	struct drm_connector *connector;
574 
575 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
576 		   encoder->base.base.id, encoder->base.name);
577 
578 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
579 	drm_for_each_connector_iter(connector, &conn_iter) {
580 		const struct drm_connector_state *conn_state =
581 			connector->state;
582 
583 		if (conn_state->best_encoder != &encoder->base)
584 			continue;
585 
586 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
587 			   connector->base.id, connector->name);
588 	}
589 	drm_connector_list_iter_end(&conn_iter);
590 }
591 
592 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
593 {
594 	const struct drm_display_mode *mode = panel->fixed_mode;
595 
596 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
597 }
598 
599 static void intel_hdcp_info(struct seq_file *m,
600 			    struct intel_connector *intel_connector)
601 {
602 	bool hdcp_cap, hdcp2_cap;
603 
604 	if (!intel_connector->hdcp.shim) {
605 		seq_puts(m, "No Connector Support");
606 		goto out;
607 	}
608 
609 	hdcp_cap = intel_hdcp_capable(intel_connector);
610 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
611 
612 	if (hdcp_cap)
613 		seq_puts(m, "HDCP1.4 ");
614 	if (hdcp2_cap)
615 		seq_puts(m, "HDCP2.2 ");
616 
617 	if (!hdcp_cap && !hdcp2_cap)
618 		seq_puts(m, "None");
619 
620 out:
621 	seq_puts(m, "\n");
622 }
623 
624 static void intel_dp_info(struct seq_file *m,
625 			  struct intel_connector *intel_connector)
626 {
627 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
628 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
629 	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
630 
631 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
632 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
633 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
634 		intel_panel_info(m, &intel_connector->panel);
635 
636 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
637 				edid ? edid->data : NULL, &intel_dp->aux);
638 }
639 
640 static void intel_dp_mst_info(struct seq_file *m,
641 			      struct intel_connector *intel_connector)
642 {
643 	bool has_audio = intel_connector->port->has_audio;
644 
645 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
646 }
647 
648 static void intel_hdmi_info(struct seq_file *m,
649 			    struct intel_connector *intel_connector)
650 {
651 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
652 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
653 
654 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
655 }
656 
657 static void intel_lvds_info(struct seq_file *m,
658 			    struct intel_connector *intel_connector)
659 {
660 	intel_panel_info(m, &intel_connector->panel);
661 }
662 
663 static void intel_connector_info(struct seq_file *m,
664 				 struct drm_connector *connector)
665 {
666 	struct intel_connector *intel_connector = to_intel_connector(connector);
667 	const struct drm_connector_state *conn_state = connector->state;
668 	struct intel_encoder *encoder =
669 		to_intel_encoder(conn_state->best_encoder);
670 	const struct drm_display_mode *mode;
671 
672 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
673 		   connector->base.id, connector->name,
674 		   drm_get_connector_status_name(connector->status));
675 
676 	if (connector->status == connector_status_disconnected)
677 		return;
678 
679 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
680 		   connector->display_info.width_mm,
681 		   connector->display_info.height_mm);
682 	seq_printf(m, "\tsubpixel order: %s\n",
683 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
684 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
685 
686 	if (!encoder)
687 		return;
688 
689 	switch (connector->connector_type) {
690 	case DRM_MODE_CONNECTOR_DisplayPort:
691 	case DRM_MODE_CONNECTOR_eDP:
692 		if (encoder->type == INTEL_OUTPUT_DP_MST)
693 			intel_dp_mst_info(m, intel_connector);
694 		else
695 			intel_dp_info(m, intel_connector);
696 		break;
697 	case DRM_MODE_CONNECTOR_LVDS:
698 		if (encoder->type == INTEL_OUTPUT_LVDS)
699 			intel_lvds_info(m, intel_connector);
700 		break;
701 	case DRM_MODE_CONNECTOR_HDMIA:
702 		if (encoder->type == INTEL_OUTPUT_HDMI ||
703 		    encoder->type == INTEL_OUTPUT_DDI)
704 			intel_hdmi_info(m, intel_connector);
705 		break;
706 	default:
707 		break;
708 	}
709 
710 	seq_puts(m, "\tHDCP version: ");
711 	intel_hdcp_info(m, intel_connector);
712 
713 	seq_printf(m, "\tmodes:\n");
714 	list_for_each_entry(mode, &connector->modes, head)
715 		intel_seq_print_mode(m, 2, mode);
716 }
717 
718 static const char *plane_type(enum drm_plane_type type)
719 {
720 	switch (type) {
721 	case DRM_PLANE_TYPE_OVERLAY:
722 		return "OVL";
723 	case DRM_PLANE_TYPE_PRIMARY:
724 		return "PRI";
725 	case DRM_PLANE_TYPE_CURSOR:
726 		return "CUR";
727 	/*
728 	 * Deliberately omitting default: to generate compiler warnings
729 	 * when a new drm_plane_type gets added.
730 	 */
731 	}
732 
733 	return "unknown";
734 }
735 
736 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
737 {
738 	/*
739 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
740 	 * will print them all to visualize if the values are misused
741 	 */
742 	snprintf(buf, bufsize,
743 		 "%s%s%s%s%s%s(0x%08x)",
744 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
745 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
746 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
747 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
748 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
749 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
750 		 rotation);
751 }
752 
753 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
754 {
755 	const struct intel_plane_state *plane_state =
756 		to_intel_plane_state(plane->base.state);
757 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
758 	struct drm_format_name_buf format_name;
759 	struct drm_rect src, dst;
760 	char rot_str[48];
761 
762 	src = drm_plane_state_src(&plane_state->uapi);
763 	dst = drm_plane_state_dest(&plane_state->uapi);
764 
765 	if (fb)
766 		drm_get_format_name(fb->format->format, &format_name);
767 
768 	plane_rotation(rot_str, sizeof(rot_str),
769 		       plane_state->uapi.rotation);
770 
771 	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
772 		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
773 		   fb ? fb->width : 0, fb ? fb->height : 0,
774 		   DRM_RECT_FP_ARG(&src),
775 		   DRM_RECT_ARG(&dst),
776 		   rot_str);
777 }
778 
779 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
780 {
781 	const struct intel_plane_state *plane_state =
782 		to_intel_plane_state(plane->base.state);
783 	const struct drm_framebuffer *fb = plane_state->hw.fb;
784 	struct drm_format_name_buf format_name;
785 	char rot_str[48];
786 
787 	if (!fb)
788 		return;
789 
790 	drm_get_format_name(fb->format->format, &format_name);
791 
792 	plane_rotation(rot_str, sizeof(rot_str),
793 		       plane_state->hw.rotation);
794 
795 	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
796 		   fb->base.id, format_name.str,
797 		   fb->width, fb->height,
798 		   yesno(plane_state->uapi.visible),
799 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
800 		   DRM_RECT_ARG(&plane_state->uapi.dst),
801 		   rot_str);
802 }
803 
804 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
805 {
806 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
807 	struct intel_plane *plane;
808 
809 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
810 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
811 			   plane->base.base.id, plane->base.name,
812 			   plane_type(plane->base.type));
813 		intel_plane_uapi_info(m, plane);
814 		intel_plane_hw_info(m, plane);
815 	}
816 }
817 
818 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
819 {
820 	const struct intel_crtc_state *crtc_state =
821 		to_intel_crtc_state(crtc->base.state);
822 	int num_scalers = crtc->num_scalers;
823 	int i;
824 
825 	/* Not all platformas have a scaler */
826 	if (num_scalers) {
827 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
828 			   num_scalers,
829 			   crtc_state->scaler_state.scaler_users,
830 			   crtc_state->scaler_state.scaler_id);
831 
832 		for (i = 0; i < num_scalers; i++) {
833 			const struct intel_scaler *sc =
834 				&crtc_state->scaler_state.scalers[i];
835 
836 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
837 				   i, yesno(sc->in_use), sc->mode);
838 		}
839 		seq_puts(m, "\n");
840 	} else {
841 		seq_puts(m, "\tNo scalers available on this platform\n");
842 	}
843 }
844 
845 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
846 {
847 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
848 	const struct intel_crtc_state *crtc_state =
849 		to_intel_crtc_state(crtc->base.state);
850 	struct intel_encoder *encoder;
851 
852 	seq_printf(m, "[CRTC:%d:%s]:\n",
853 		   crtc->base.base.id, crtc->base.name);
854 
855 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
856 		   yesno(crtc_state->uapi.enable),
857 		   yesno(crtc_state->uapi.active),
858 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
859 
860 	if (crtc_state->hw.enable) {
861 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
862 			   yesno(crtc_state->hw.active),
863 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
864 
865 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
866 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
867 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
868 
869 		intel_scaler_info(m, crtc);
870 	}
871 
872 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
873 				    crtc_state->uapi.encoder_mask)
874 		intel_encoder_info(m, crtc, encoder);
875 
876 	intel_plane_info(m, crtc);
877 
878 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
879 		   yesno(!crtc->cpu_fifo_underrun_disabled),
880 		   yesno(!crtc->pch_fifo_underrun_disabled));
881 }
882 
883 static int i915_display_info(struct seq_file *m, void *unused)
884 {
885 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
886 	struct drm_device *dev = &dev_priv->drm;
887 	struct intel_crtc *crtc;
888 	struct drm_connector *connector;
889 	struct drm_connector_list_iter conn_iter;
890 	intel_wakeref_t wakeref;
891 
892 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
893 
894 	drm_modeset_lock_all(dev);
895 
896 	seq_printf(m, "CRTC info\n");
897 	seq_printf(m, "---------\n");
898 	for_each_intel_crtc(dev, crtc)
899 		intel_crtc_info(m, crtc);
900 
901 	seq_printf(m, "\n");
902 	seq_printf(m, "Connector info\n");
903 	seq_printf(m, "--------------\n");
904 	drm_connector_list_iter_begin(dev, &conn_iter);
905 	drm_for_each_connector_iter(connector, &conn_iter)
906 		intel_connector_info(m, connector);
907 	drm_connector_list_iter_end(&conn_iter);
908 
909 	drm_modeset_unlock_all(dev);
910 
911 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
912 
913 	return 0;
914 }
915 
916 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
917 {
918 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
919 	struct drm_device *dev = &dev_priv->drm;
920 	int i;
921 
922 	drm_modeset_lock_all(dev);
923 
924 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
925 		   dev_priv->dpll.ref_clks.nssc,
926 		   dev_priv->dpll.ref_clks.ssc);
927 
928 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
929 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
930 
931 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
932 			   pll->info->id);
933 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
934 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
935 		seq_printf(m, " tracked hardware state:\n");
936 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
937 		seq_printf(m, " dpll_md: 0x%08x\n",
938 			   pll->state.hw_state.dpll_md);
939 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
940 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
941 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
942 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
943 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
944 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
945 			   pll->state.hw_state.mg_refclkin_ctl);
946 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
947 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
948 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
949 			   pll->state.hw_state.mg_clktop2_hsclkctl);
950 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
951 			   pll->state.hw_state.mg_pll_div0);
952 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
953 			   pll->state.hw_state.mg_pll_div1);
954 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
955 			   pll->state.hw_state.mg_pll_lf);
956 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
957 			   pll->state.hw_state.mg_pll_frac_lock);
958 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
959 			   pll->state.hw_state.mg_pll_ssc);
960 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
961 			   pll->state.hw_state.mg_pll_bias);
962 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
963 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
964 	}
965 	drm_modeset_unlock_all(dev);
966 
967 	return 0;
968 }
969 
970 static int i915_ipc_status_show(struct seq_file *m, void *data)
971 {
972 	struct drm_i915_private *dev_priv = m->private;
973 
974 	seq_printf(m, "Isochronous Priority Control: %s\n",
975 			yesno(dev_priv->ipc_enabled));
976 	return 0;
977 }
978 
979 static int i915_ipc_status_open(struct inode *inode, struct file *file)
980 {
981 	struct drm_i915_private *dev_priv = inode->i_private;
982 
983 	if (!HAS_IPC(dev_priv))
984 		return -ENODEV;
985 
986 	return single_open(file, i915_ipc_status_show, dev_priv);
987 }
988 
989 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
990 				     size_t len, loff_t *offp)
991 {
992 	struct seq_file *m = file->private_data;
993 	struct drm_i915_private *dev_priv = m->private;
994 	intel_wakeref_t wakeref;
995 	bool enable;
996 	int ret;
997 
998 	ret = kstrtobool_from_user(ubuf, len, &enable);
999 	if (ret < 0)
1000 		return ret;
1001 
1002 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1003 		if (!dev_priv->ipc_enabled && enable)
1004 			drm_info(&dev_priv->drm,
1005 				 "Enabling IPC: WM will be proper only after next commit\n");
1006 		dev_priv->wm.distrust_bios_wm = true;
1007 		dev_priv->ipc_enabled = enable;
1008 		intel_enable_ipc(dev_priv);
1009 	}
1010 
1011 	return len;
1012 }
1013 
1014 static const struct file_operations i915_ipc_status_fops = {
1015 	.owner = THIS_MODULE,
1016 	.open = i915_ipc_status_open,
1017 	.read = seq_read,
1018 	.llseek = seq_lseek,
1019 	.release = single_release,
1020 	.write = i915_ipc_status_write
1021 };
1022 
1023 static int i915_ddb_info(struct seq_file *m, void *unused)
1024 {
1025 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1026 	struct drm_device *dev = &dev_priv->drm;
1027 	struct skl_ddb_entry *entry;
1028 	struct intel_crtc *crtc;
1029 
1030 	if (INTEL_GEN(dev_priv) < 9)
1031 		return -ENODEV;
1032 
1033 	drm_modeset_lock_all(dev);
1034 
1035 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1036 
1037 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1038 		struct intel_crtc_state *crtc_state =
1039 			to_intel_crtc_state(crtc->base.state);
1040 		enum pipe pipe = crtc->pipe;
1041 		enum plane_id plane_id;
1042 
1043 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1044 
1045 		for_each_plane_id_on_crtc(crtc, plane_id) {
1046 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1047 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1048 				   entry->start, entry->end,
1049 				   skl_ddb_entry_size(entry));
1050 		}
1051 
1052 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1053 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1054 			   entry->end, skl_ddb_entry_size(entry));
1055 	}
1056 
1057 	drm_modeset_unlock_all(dev);
1058 
1059 	return 0;
1060 }
1061 
1062 static void drrs_status_per_crtc(struct seq_file *m,
1063 				 struct drm_device *dev,
1064 				 struct intel_crtc *intel_crtc)
1065 {
1066 	struct drm_i915_private *dev_priv = to_i915(dev);
1067 	struct i915_drrs *drrs = &dev_priv->drrs;
1068 	int vrefresh = 0;
1069 	struct drm_connector *connector;
1070 	struct drm_connector_list_iter conn_iter;
1071 
1072 	drm_connector_list_iter_begin(dev, &conn_iter);
1073 	drm_for_each_connector_iter(connector, &conn_iter) {
1074 		bool supported = false;
1075 
1076 		if (connector->state->crtc != &intel_crtc->base)
1077 			continue;
1078 
1079 		seq_printf(m, "%s:\n", connector->name);
1080 
1081 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1082 		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1083 			supported = true;
1084 
1085 		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1086 	}
1087 	drm_connector_list_iter_end(&conn_iter);
1088 
1089 	seq_puts(m, "\n");
1090 
1091 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1092 		struct intel_panel *panel;
1093 
1094 		mutex_lock(&drrs->mutex);
1095 		/* DRRS Supported */
1096 		seq_puts(m, "\tDRRS Enabled: Yes\n");
1097 
1098 		/* disable_drrs() will make drrs->dp NULL */
1099 		if (!drrs->dp) {
1100 			seq_puts(m, "Idleness DRRS: Disabled\n");
1101 			if (dev_priv->psr.enabled)
1102 				seq_puts(m,
1103 				"\tAs PSR is enabled, DRRS is not enabled\n");
1104 			mutex_unlock(&drrs->mutex);
1105 			return;
1106 		}
1107 
1108 		panel = &drrs->dp->attached_connector->panel;
1109 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1110 					drrs->busy_frontbuffer_bits);
1111 
1112 		seq_puts(m, "\n\t\t");
1113 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1114 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1115 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1116 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1117 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1118 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1119 		} else {
1120 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1121 						drrs->refresh_rate_type);
1122 			mutex_unlock(&drrs->mutex);
1123 			return;
1124 		}
1125 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1126 
1127 		seq_puts(m, "\n\t\t");
1128 		mutex_unlock(&drrs->mutex);
1129 	} else {
1130 		/* DRRS not supported. Print the VBT parameter*/
1131 		seq_puts(m, "\tDRRS Enabled : No");
1132 	}
1133 	seq_puts(m, "\n");
1134 }
1135 
1136 static int i915_drrs_status(struct seq_file *m, void *unused)
1137 {
1138 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1139 	struct drm_device *dev = &dev_priv->drm;
1140 	struct intel_crtc *intel_crtc;
1141 	int active_crtc_cnt = 0;
1142 
1143 	drm_modeset_lock_all(dev);
1144 	for_each_intel_crtc(dev, intel_crtc) {
1145 		if (intel_crtc->base.state->active) {
1146 			active_crtc_cnt++;
1147 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1148 
1149 			drrs_status_per_crtc(m, dev, intel_crtc);
1150 		}
1151 	}
1152 	drm_modeset_unlock_all(dev);
1153 
1154 	if (!active_crtc_cnt)
1155 		seq_puts(m, "No active crtc found\n");
1156 
1157 	return 0;
1158 }
1159 
1160 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1161 				seq_puts(m, "LPSP: disabled\n"))
1162 
1163 static bool
1164 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1165 			      enum i915_power_well_id power_well_id)
1166 {
1167 	intel_wakeref_t wakeref;
1168 	bool is_enabled;
1169 
1170 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1171 	is_enabled = intel_display_power_well_is_enabled(i915,
1172 							 power_well_id);
1173 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1174 
1175 	return is_enabled;
1176 }
1177 
1178 static int i915_lpsp_status(struct seq_file *m, void *unused)
1179 {
1180 	struct drm_i915_private *i915 = node_to_i915(m->private);
1181 
1182 	switch (INTEL_GEN(i915)) {
1183 	case 12:
1184 	case 11:
1185 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1186 		break;
1187 	case 10:
1188 	case 9:
1189 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1190 		break;
1191 	default:
1192 		/*
1193 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1194 		 * support lpsp.
1195 		 */
1196 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1197 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1198 		else
1199 			seq_puts(m, "LPSP: not supported\n");
1200 	}
1201 
1202 	return 0;
1203 }
1204 
1205 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1206 {
1207 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1208 	struct drm_device *dev = &dev_priv->drm;
1209 	struct intel_encoder *intel_encoder;
1210 	struct intel_digital_port *dig_port;
1211 	struct drm_connector *connector;
1212 	struct drm_connector_list_iter conn_iter;
1213 
1214 	drm_connector_list_iter_begin(dev, &conn_iter);
1215 	drm_for_each_connector_iter(connector, &conn_iter) {
1216 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1217 			continue;
1218 
1219 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1220 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1221 			continue;
1222 
1223 		dig_port = enc_to_dig_port(intel_encoder);
1224 		if (!dig_port->dp.can_mst)
1225 			continue;
1226 
1227 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1228 			   dig_port->base.base.base.id,
1229 			   dig_port->base.base.name);
1230 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1231 	}
1232 	drm_connector_list_iter_end(&conn_iter);
1233 
1234 	return 0;
1235 }
1236 
1237 static ssize_t i915_displayport_test_active_write(struct file *file,
1238 						  const char __user *ubuf,
1239 						  size_t len, loff_t *offp)
1240 {
1241 	char *input_buffer;
1242 	int status = 0;
1243 	struct drm_device *dev;
1244 	struct drm_connector *connector;
1245 	struct drm_connector_list_iter conn_iter;
1246 	struct intel_dp *intel_dp;
1247 	int val = 0;
1248 
1249 	dev = ((struct seq_file *)file->private_data)->private;
1250 
1251 	if (len == 0)
1252 		return 0;
1253 
1254 	input_buffer = memdup_user_nul(ubuf, len);
1255 	if (IS_ERR(input_buffer))
1256 		return PTR_ERR(input_buffer);
1257 
1258 	drm_dbg(&to_i915(dev)->drm,
1259 		"Copied %d bytes from user\n", (unsigned int)len);
1260 
1261 	drm_connector_list_iter_begin(dev, &conn_iter);
1262 	drm_for_each_connector_iter(connector, &conn_iter) {
1263 		struct intel_encoder *encoder;
1264 
1265 		if (connector->connector_type !=
1266 		    DRM_MODE_CONNECTOR_DisplayPort)
1267 			continue;
1268 
1269 		encoder = to_intel_encoder(connector->encoder);
1270 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1271 			continue;
1272 
1273 		if (encoder && connector->status == connector_status_connected) {
1274 			intel_dp = enc_to_intel_dp(encoder);
1275 			status = kstrtoint(input_buffer, 10, &val);
1276 			if (status < 0)
1277 				break;
1278 			drm_dbg(&to_i915(dev)->drm,
1279 				"Got %d for test active\n", val);
1280 			/* To prevent erroneous activation of the compliance
1281 			 * testing code, only accept an actual value of 1 here
1282 			 */
1283 			if (val == 1)
1284 				intel_dp->compliance.test_active = true;
1285 			else
1286 				intel_dp->compliance.test_active = false;
1287 		}
1288 	}
1289 	drm_connector_list_iter_end(&conn_iter);
1290 	kfree(input_buffer);
1291 	if (status < 0)
1292 		return status;
1293 
1294 	*offp += len;
1295 	return len;
1296 }
1297 
1298 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1299 {
1300 	struct drm_i915_private *dev_priv = m->private;
1301 	struct drm_device *dev = &dev_priv->drm;
1302 	struct drm_connector *connector;
1303 	struct drm_connector_list_iter conn_iter;
1304 	struct intel_dp *intel_dp;
1305 
1306 	drm_connector_list_iter_begin(dev, &conn_iter);
1307 	drm_for_each_connector_iter(connector, &conn_iter) {
1308 		struct intel_encoder *encoder;
1309 
1310 		if (connector->connector_type !=
1311 		    DRM_MODE_CONNECTOR_DisplayPort)
1312 			continue;
1313 
1314 		encoder = to_intel_encoder(connector->encoder);
1315 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1316 			continue;
1317 
1318 		if (encoder && connector->status == connector_status_connected) {
1319 			intel_dp = enc_to_intel_dp(encoder);
1320 			if (intel_dp->compliance.test_active)
1321 				seq_puts(m, "1");
1322 			else
1323 				seq_puts(m, "0");
1324 		} else
1325 			seq_puts(m, "0");
1326 	}
1327 	drm_connector_list_iter_end(&conn_iter);
1328 
1329 	return 0;
1330 }
1331 
1332 static int i915_displayport_test_active_open(struct inode *inode,
1333 					     struct file *file)
1334 {
1335 	return single_open(file, i915_displayport_test_active_show,
1336 			   inode->i_private);
1337 }
1338 
1339 static const struct file_operations i915_displayport_test_active_fops = {
1340 	.owner = THIS_MODULE,
1341 	.open = i915_displayport_test_active_open,
1342 	.read = seq_read,
1343 	.llseek = seq_lseek,
1344 	.release = single_release,
1345 	.write = i915_displayport_test_active_write
1346 };
1347 
1348 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1349 {
1350 	struct drm_i915_private *dev_priv = m->private;
1351 	struct drm_device *dev = &dev_priv->drm;
1352 	struct drm_connector *connector;
1353 	struct drm_connector_list_iter conn_iter;
1354 	struct intel_dp *intel_dp;
1355 
1356 	drm_connector_list_iter_begin(dev, &conn_iter);
1357 	drm_for_each_connector_iter(connector, &conn_iter) {
1358 		struct intel_encoder *encoder;
1359 
1360 		if (connector->connector_type !=
1361 		    DRM_MODE_CONNECTOR_DisplayPort)
1362 			continue;
1363 
1364 		encoder = to_intel_encoder(connector->encoder);
1365 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1366 			continue;
1367 
1368 		if (encoder && connector->status == connector_status_connected) {
1369 			intel_dp = enc_to_intel_dp(encoder);
1370 			if (intel_dp->compliance.test_type ==
1371 			    DP_TEST_LINK_EDID_READ)
1372 				seq_printf(m, "%lx",
1373 					   intel_dp->compliance.test_data.edid);
1374 			else if (intel_dp->compliance.test_type ==
1375 				 DP_TEST_LINK_VIDEO_PATTERN) {
1376 				seq_printf(m, "hdisplay: %d\n",
1377 					   intel_dp->compliance.test_data.hdisplay);
1378 				seq_printf(m, "vdisplay: %d\n",
1379 					   intel_dp->compliance.test_data.vdisplay);
1380 				seq_printf(m, "bpc: %u\n",
1381 					   intel_dp->compliance.test_data.bpc);
1382 			} else if (intel_dp->compliance.test_type ==
1383 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1384 				seq_printf(m, "pattern: %d\n",
1385 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1386 				seq_printf(m, "Number of lanes: %d\n",
1387 					   intel_dp->compliance.test_data.phytest.num_lanes);
1388 				seq_printf(m, "Link Rate: %d\n",
1389 					   intel_dp->compliance.test_data.phytest.link_rate);
1390 				seq_printf(m, "level: %02x\n",
1391 					   intel_dp->train_set[0]);
1392 			}
1393 		} else
1394 			seq_puts(m, "0");
1395 	}
1396 	drm_connector_list_iter_end(&conn_iter);
1397 
1398 	return 0;
1399 }
1400 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1401 
1402 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1403 {
1404 	struct drm_i915_private *dev_priv = m->private;
1405 	struct drm_device *dev = &dev_priv->drm;
1406 	struct drm_connector *connector;
1407 	struct drm_connector_list_iter conn_iter;
1408 	struct intel_dp *intel_dp;
1409 
1410 	drm_connector_list_iter_begin(dev, &conn_iter);
1411 	drm_for_each_connector_iter(connector, &conn_iter) {
1412 		struct intel_encoder *encoder;
1413 
1414 		if (connector->connector_type !=
1415 		    DRM_MODE_CONNECTOR_DisplayPort)
1416 			continue;
1417 
1418 		encoder = to_intel_encoder(connector->encoder);
1419 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1420 			continue;
1421 
1422 		if (encoder && connector->status == connector_status_connected) {
1423 			intel_dp = enc_to_intel_dp(encoder);
1424 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1425 		} else
1426 			seq_puts(m, "0");
1427 	}
1428 	drm_connector_list_iter_end(&conn_iter);
1429 
1430 	return 0;
1431 }
1432 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1433 
1434 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1435 {
1436 	struct drm_i915_private *dev_priv = m->private;
1437 	struct drm_device *dev = &dev_priv->drm;
1438 	int level;
1439 	int num_levels;
1440 
1441 	if (IS_CHERRYVIEW(dev_priv))
1442 		num_levels = 3;
1443 	else if (IS_VALLEYVIEW(dev_priv))
1444 		num_levels = 1;
1445 	else if (IS_G4X(dev_priv))
1446 		num_levels = 3;
1447 	else
1448 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1449 
1450 	drm_modeset_lock_all(dev);
1451 
1452 	for (level = 0; level < num_levels; level++) {
1453 		unsigned int latency = wm[level];
1454 
1455 		/*
1456 		 * - WM1+ latency values in 0.5us units
1457 		 * - latencies are in us on gen9/vlv/chv
1458 		 */
1459 		if (INTEL_GEN(dev_priv) >= 9 ||
1460 		    IS_VALLEYVIEW(dev_priv) ||
1461 		    IS_CHERRYVIEW(dev_priv) ||
1462 		    IS_G4X(dev_priv))
1463 			latency *= 10;
1464 		else if (level > 0)
1465 			latency *= 5;
1466 
1467 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1468 			   level, wm[level], latency / 10, latency % 10);
1469 	}
1470 
1471 	drm_modeset_unlock_all(dev);
1472 }
1473 
1474 static int pri_wm_latency_show(struct seq_file *m, void *data)
1475 {
1476 	struct drm_i915_private *dev_priv = m->private;
1477 	const u16 *latencies;
1478 
1479 	if (INTEL_GEN(dev_priv) >= 9)
1480 		latencies = dev_priv->wm.skl_latency;
1481 	else
1482 		latencies = dev_priv->wm.pri_latency;
1483 
1484 	wm_latency_show(m, latencies);
1485 
1486 	return 0;
1487 }
1488 
1489 static int spr_wm_latency_show(struct seq_file *m, void *data)
1490 {
1491 	struct drm_i915_private *dev_priv = m->private;
1492 	const u16 *latencies;
1493 
1494 	if (INTEL_GEN(dev_priv) >= 9)
1495 		latencies = dev_priv->wm.skl_latency;
1496 	else
1497 		latencies = dev_priv->wm.spr_latency;
1498 
1499 	wm_latency_show(m, latencies);
1500 
1501 	return 0;
1502 }
1503 
1504 static int cur_wm_latency_show(struct seq_file *m, void *data)
1505 {
1506 	struct drm_i915_private *dev_priv = m->private;
1507 	const u16 *latencies;
1508 
1509 	if (INTEL_GEN(dev_priv) >= 9)
1510 		latencies = dev_priv->wm.skl_latency;
1511 	else
1512 		latencies = dev_priv->wm.cur_latency;
1513 
1514 	wm_latency_show(m, latencies);
1515 
1516 	return 0;
1517 }
1518 
1519 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1520 {
1521 	struct drm_i915_private *dev_priv = inode->i_private;
1522 
1523 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
1524 		return -ENODEV;
1525 
1526 	return single_open(file, pri_wm_latency_show, dev_priv);
1527 }
1528 
1529 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1530 {
1531 	struct drm_i915_private *dev_priv = inode->i_private;
1532 
1533 	if (HAS_GMCH(dev_priv))
1534 		return -ENODEV;
1535 
1536 	return single_open(file, spr_wm_latency_show, dev_priv);
1537 }
1538 
1539 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1540 {
1541 	struct drm_i915_private *dev_priv = inode->i_private;
1542 
1543 	if (HAS_GMCH(dev_priv))
1544 		return -ENODEV;
1545 
1546 	return single_open(file, cur_wm_latency_show, dev_priv);
1547 }
1548 
1549 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1550 				size_t len, loff_t *offp, u16 wm[8])
1551 {
1552 	struct seq_file *m = file->private_data;
1553 	struct drm_i915_private *dev_priv = m->private;
1554 	struct drm_device *dev = &dev_priv->drm;
1555 	u16 new[8] = { 0 };
1556 	int num_levels;
1557 	int level;
1558 	int ret;
1559 	char tmp[32];
1560 
1561 	if (IS_CHERRYVIEW(dev_priv))
1562 		num_levels = 3;
1563 	else if (IS_VALLEYVIEW(dev_priv))
1564 		num_levels = 1;
1565 	else if (IS_G4X(dev_priv))
1566 		num_levels = 3;
1567 	else
1568 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1569 
1570 	if (len >= sizeof(tmp))
1571 		return -EINVAL;
1572 
1573 	if (copy_from_user(tmp, ubuf, len))
1574 		return -EFAULT;
1575 
1576 	tmp[len] = '\0';
1577 
1578 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1579 		     &new[0], &new[1], &new[2], &new[3],
1580 		     &new[4], &new[5], &new[6], &new[7]);
1581 	if (ret != num_levels)
1582 		return -EINVAL;
1583 
1584 	drm_modeset_lock_all(dev);
1585 
1586 	for (level = 0; level < num_levels; level++)
1587 		wm[level] = new[level];
1588 
1589 	drm_modeset_unlock_all(dev);
1590 
1591 	return len;
1592 }
1593 
1594 
1595 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1596 				    size_t len, loff_t *offp)
1597 {
1598 	struct seq_file *m = file->private_data;
1599 	struct drm_i915_private *dev_priv = m->private;
1600 	u16 *latencies;
1601 
1602 	if (INTEL_GEN(dev_priv) >= 9)
1603 		latencies = dev_priv->wm.skl_latency;
1604 	else
1605 		latencies = dev_priv->wm.pri_latency;
1606 
1607 	return wm_latency_write(file, ubuf, len, offp, latencies);
1608 }
1609 
1610 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1611 				    size_t len, loff_t *offp)
1612 {
1613 	struct seq_file *m = file->private_data;
1614 	struct drm_i915_private *dev_priv = m->private;
1615 	u16 *latencies;
1616 
1617 	if (INTEL_GEN(dev_priv) >= 9)
1618 		latencies = dev_priv->wm.skl_latency;
1619 	else
1620 		latencies = dev_priv->wm.spr_latency;
1621 
1622 	return wm_latency_write(file, ubuf, len, offp, latencies);
1623 }
1624 
1625 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1626 				    size_t len, loff_t *offp)
1627 {
1628 	struct seq_file *m = file->private_data;
1629 	struct drm_i915_private *dev_priv = m->private;
1630 	u16 *latencies;
1631 
1632 	if (INTEL_GEN(dev_priv) >= 9)
1633 		latencies = dev_priv->wm.skl_latency;
1634 	else
1635 		latencies = dev_priv->wm.cur_latency;
1636 
1637 	return wm_latency_write(file, ubuf, len, offp, latencies);
1638 }
1639 
1640 static const struct file_operations i915_pri_wm_latency_fops = {
1641 	.owner = THIS_MODULE,
1642 	.open = pri_wm_latency_open,
1643 	.read = seq_read,
1644 	.llseek = seq_lseek,
1645 	.release = single_release,
1646 	.write = pri_wm_latency_write
1647 };
1648 
1649 static const struct file_operations i915_spr_wm_latency_fops = {
1650 	.owner = THIS_MODULE,
1651 	.open = spr_wm_latency_open,
1652 	.read = seq_read,
1653 	.llseek = seq_lseek,
1654 	.release = single_release,
1655 	.write = spr_wm_latency_write
1656 };
1657 
1658 static const struct file_operations i915_cur_wm_latency_fops = {
1659 	.owner = THIS_MODULE,
1660 	.open = cur_wm_latency_open,
1661 	.read = seq_read,
1662 	.llseek = seq_lseek,
1663 	.release = single_release,
1664 	.write = cur_wm_latency_write
1665 };
1666 
1667 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1668 {
1669 	struct drm_i915_private *dev_priv = m->private;
1670 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1671 
1672 	/* Synchronize with everything first in case there's been an HPD
1673 	 * storm, but we haven't finished handling it in the kernel yet
1674 	 */
1675 	intel_synchronize_irq(dev_priv);
1676 	flush_work(&dev_priv->hotplug.dig_port_work);
1677 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1678 
1679 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1680 	seq_printf(m, "Detected: %s\n",
1681 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1682 
1683 	return 0;
1684 }
1685 
1686 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1687 					const char __user *ubuf, size_t len,
1688 					loff_t *offp)
1689 {
1690 	struct seq_file *m = file->private_data;
1691 	struct drm_i915_private *dev_priv = m->private;
1692 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1693 	unsigned int new_threshold;
1694 	int i;
1695 	char *newline;
1696 	char tmp[16];
1697 
1698 	if (len >= sizeof(tmp))
1699 		return -EINVAL;
1700 
1701 	if (copy_from_user(tmp, ubuf, len))
1702 		return -EFAULT;
1703 
1704 	tmp[len] = '\0';
1705 
1706 	/* Strip newline, if any */
1707 	newline = strchr(tmp, '\n');
1708 	if (newline)
1709 		*newline = '\0';
1710 
1711 	if (strcmp(tmp, "reset") == 0)
1712 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1713 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1714 		return -EINVAL;
1715 
1716 	if (new_threshold > 0)
1717 		drm_dbg_kms(&dev_priv->drm,
1718 			    "Setting HPD storm detection threshold to %d\n",
1719 			    new_threshold);
1720 	else
1721 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1722 
1723 	spin_lock_irq(&dev_priv->irq_lock);
1724 	hotplug->hpd_storm_threshold = new_threshold;
1725 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1726 	for_each_hpd_pin(i)
1727 		hotplug->stats[i].count = 0;
1728 	spin_unlock_irq(&dev_priv->irq_lock);
1729 
1730 	/* Re-enable hpd immediately if we were in an irq storm */
1731 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1732 
1733 	return len;
1734 }
1735 
1736 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1737 {
1738 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1739 }
1740 
1741 static const struct file_operations i915_hpd_storm_ctl_fops = {
1742 	.owner = THIS_MODULE,
1743 	.open = i915_hpd_storm_ctl_open,
1744 	.read = seq_read,
1745 	.llseek = seq_lseek,
1746 	.release = single_release,
1747 	.write = i915_hpd_storm_ctl_write
1748 };
1749 
1750 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1751 {
1752 	struct drm_i915_private *dev_priv = m->private;
1753 
1754 	seq_printf(m, "Enabled: %s\n",
1755 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1756 
1757 	return 0;
1758 }
1759 
1760 static int
1761 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1762 {
1763 	return single_open(file, i915_hpd_short_storm_ctl_show,
1764 			   inode->i_private);
1765 }
1766 
1767 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1768 					      const char __user *ubuf,
1769 					      size_t len, loff_t *offp)
1770 {
1771 	struct seq_file *m = file->private_data;
1772 	struct drm_i915_private *dev_priv = m->private;
1773 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1774 	char *newline;
1775 	char tmp[16];
1776 	int i;
1777 	bool new_state;
1778 
1779 	if (len >= sizeof(tmp))
1780 		return -EINVAL;
1781 
1782 	if (copy_from_user(tmp, ubuf, len))
1783 		return -EFAULT;
1784 
1785 	tmp[len] = '\0';
1786 
1787 	/* Strip newline, if any */
1788 	newline = strchr(tmp, '\n');
1789 	if (newline)
1790 		*newline = '\0';
1791 
1792 	/* Reset to the "default" state for this system */
1793 	if (strcmp(tmp, "reset") == 0)
1794 		new_state = !HAS_DP_MST(dev_priv);
1795 	else if (kstrtobool(tmp, &new_state) != 0)
1796 		return -EINVAL;
1797 
1798 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1799 		    new_state ? "En" : "Dis");
1800 
1801 	spin_lock_irq(&dev_priv->irq_lock);
1802 	hotplug->hpd_short_storm_enabled = new_state;
1803 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1804 	for_each_hpd_pin(i)
1805 		hotplug->stats[i].count = 0;
1806 	spin_unlock_irq(&dev_priv->irq_lock);
1807 
1808 	/* Re-enable hpd immediately if we were in an irq storm */
1809 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1810 
1811 	return len;
1812 }
1813 
1814 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1815 	.owner = THIS_MODULE,
1816 	.open = i915_hpd_short_storm_ctl_open,
1817 	.read = seq_read,
1818 	.llseek = seq_lseek,
1819 	.release = single_release,
1820 	.write = i915_hpd_short_storm_ctl_write,
1821 };
1822 
1823 static int i915_drrs_ctl_set(void *data, u64 val)
1824 {
1825 	struct drm_i915_private *dev_priv = data;
1826 	struct drm_device *dev = &dev_priv->drm;
1827 	struct intel_crtc *crtc;
1828 
1829 	if (INTEL_GEN(dev_priv) < 7)
1830 		return -ENODEV;
1831 
1832 	for_each_intel_crtc(dev, crtc) {
1833 		struct drm_connector_list_iter conn_iter;
1834 		struct intel_crtc_state *crtc_state;
1835 		struct drm_connector *connector;
1836 		struct drm_crtc_commit *commit;
1837 		int ret;
1838 
1839 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
1840 		if (ret)
1841 			return ret;
1842 
1843 		crtc_state = to_intel_crtc_state(crtc->base.state);
1844 
1845 		if (!crtc_state->hw.active ||
1846 		    !crtc_state->has_drrs)
1847 			goto out;
1848 
1849 		commit = crtc_state->uapi.commit;
1850 		if (commit) {
1851 			ret = wait_for_completion_interruptible(&commit->hw_done);
1852 			if (ret)
1853 				goto out;
1854 		}
1855 
1856 		drm_connector_list_iter_begin(dev, &conn_iter);
1857 		drm_for_each_connector_iter(connector, &conn_iter) {
1858 			struct intel_encoder *encoder;
1859 			struct intel_dp *intel_dp;
1860 
1861 			if (!(crtc_state->uapi.connector_mask &
1862 			      drm_connector_mask(connector)))
1863 				continue;
1864 
1865 			encoder = intel_attached_encoder(to_intel_connector(connector));
1866 			if (encoder->type != INTEL_OUTPUT_EDP)
1867 				continue;
1868 
1869 			drm_dbg(&dev_priv->drm,
1870 				"Manually %sabling DRRS. %llu\n",
1871 				val ? "en" : "dis", val);
1872 
1873 			intel_dp = enc_to_intel_dp(encoder);
1874 			if (val)
1875 				intel_edp_drrs_enable(intel_dp,
1876 						      crtc_state);
1877 			else
1878 				intel_edp_drrs_disable(intel_dp,
1879 						       crtc_state);
1880 		}
1881 		drm_connector_list_iter_end(&conn_iter);
1882 
1883 out:
1884 		drm_modeset_unlock(&crtc->base.mutex);
1885 		if (ret)
1886 			return ret;
1887 	}
1888 
1889 	return 0;
1890 }
1891 
1892 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
1893 
1894 static ssize_t
1895 i915_fifo_underrun_reset_write(struct file *filp,
1896 			       const char __user *ubuf,
1897 			       size_t cnt, loff_t *ppos)
1898 {
1899 	struct drm_i915_private *dev_priv = filp->private_data;
1900 	struct intel_crtc *intel_crtc;
1901 	struct drm_device *dev = &dev_priv->drm;
1902 	int ret;
1903 	bool reset;
1904 
1905 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
1906 	if (ret)
1907 		return ret;
1908 
1909 	if (!reset)
1910 		return cnt;
1911 
1912 	for_each_intel_crtc(dev, intel_crtc) {
1913 		struct drm_crtc_commit *commit;
1914 		struct intel_crtc_state *crtc_state;
1915 
1916 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
1917 		if (ret)
1918 			return ret;
1919 
1920 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
1921 		commit = crtc_state->uapi.commit;
1922 		if (commit) {
1923 			ret = wait_for_completion_interruptible(&commit->hw_done);
1924 			if (!ret)
1925 				ret = wait_for_completion_interruptible(&commit->flip_done);
1926 		}
1927 
1928 		if (!ret && crtc_state->hw.active) {
1929 			drm_dbg_kms(&dev_priv->drm,
1930 				    "Re-arming FIFO underruns on pipe %c\n",
1931 				    pipe_name(intel_crtc->pipe));
1932 
1933 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
1934 		}
1935 
1936 		drm_modeset_unlock(&intel_crtc->base.mutex);
1937 
1938 		if (ret)
1939 			return ret;
1940 	}
1941 
1942 	ret = intel_fbc_reset_underrun(dev_priv);
1943 	if (ret)
1944 		return ret;
1945 
1946 	return cnt;
1947 }
1948 
1949 static const struct file_operations i915_fifo_underrun_reset_ops = {
1950 	.owner = THIS_MODULE,
1951 	.open = simple_open,
1952 	.write = i915_fifo_underrun_reset_write,
1953 	.llseek = default_llseek,
1954 };
1955 
1956 static const struct drm_info_list intel_display_debugfs_list[] = {
1957 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
1958 	{"i915_fbc_status", i915_fbc_status, 0},
1959 	{"i915_ips_status", i915_ips_status, 0},
1960 	{"i915_sr_status", i915_sr_status, 0},
1961 	{"i915_opregion", i915_opregion, 0},
1962 	{"i915_vbt", i915_vbt, 0},
1963 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
1964 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
1965 	{"i915_power_domain_info", i915_power_domain_info, 0},
1966 	{"i915_dmc_info", i915_dmc_info, 0},
1967 	{"i915_display_info", i915_display_info, 0},
1968 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
1969 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
1970 	{"i915_ddb_info", i915_ddb_info, 0},
1971 	{"i915_drrs_status", i915_drrs_status, 0},
1972 	{"i915_lpsp_status", i915_lpsp_status, 0},
1973 };
1974 
1975 static const struct {
1976 	const char *name;
1977 	const struct file_operations *fops;
1978 } intel_display_debugfs_files[] = {
1979 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
1980 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
1981 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
1982 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
1983 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
1984 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
1985 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
1986 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
1987 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
1988 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
1989 	{"i915_ipc_status", &i915_ipc_status_fops},
1990 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
1991 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
1992 };
1993 
1994 void intel_display_debugfs_register(struct drm_i915_private *i915)
1995 {
1996 	struct drm_minor *minor = i915->drm.primary;
1997 	int i;
1998 
1999 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2000 		debugfs_create_file(intel_display_debugfs_files[i].name,
2001 				    S_IRUGO | S_IWUSR,
2002 				    minor->debugfs_root,
2003 				    to_i915(minor->dev),
2004 				    intel_display_debugfs_files[i].fops);
2005 	}
2006 
2007 	drm_debugfs_create_files(intel_display_debugfs_list,
2008 				 ARRAY_SIZE(intel_display_debugfs_list),
2009 				 minor->debugfs_root, minor);
2010 }
2011 
2012 static int i915_panel_show(struct seq_file *m, void *data)
2013 {
2014 	struct drm_connector *connector = m->private;
2015 	struct intel_dp *intel_dp =
2016 		intel_attached_dp(to_intel_connector(connector));
2017 
2018 	if (connector->status != connector_status_connected)
2019 		return -ENODEV;
2020 
2021 	seq_printf(m, "Panel power up delay: %d\n",
2022 		   intel_dp->panel_power_up_delay);
2023 	seq_printf(m, "Panel power down delay: %d\n",
2024 		   intel_dp->panel_power_down_delay);
2025 	seq_printf(m, "Backlight on delay: %d\n",
2026 		   intel_dp->backlight_on_delay);
2027 	seq_printf(m, "Backlight off delay: %d\n",
2028 		   intel_dp->backlight_off_delay);
2029 
2030 	return 0;
2031 }
2032 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2033 
2034 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2035 {
2036 	struct drm_connector *connector = m->private;
2037 	struct intel_connector *intel_connector = to_intel_connector(connector);
2038 
2039 	if (connector->status != connector_status_connected)
2040 		return -ENODEV;
2041 
2042 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2043 		   connector->base.id);
2044 	intel_hdcp_info(m, intel_connector);
2045 
2046 	return 0;
2047 }
2048 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2049 
2050 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2051 				seq_puts(m, "LPSP: incapable\n"))
2052 
2053 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2054 {
2055 	struct drm_connector *connector = m->private;
2056 	struct drm_i915_private *i915 = to_i915(connector->dev);
2057 	struct intel_encoder *encoder;
2058 
2059 	encoder = intel_attached_encoder(to_intel_connector(connector));
2060 	if (!encoder)
2061 		return -ENODEV;
2062 
2063 	if (connector->status != connector_status_connected)
2064 		return -ENODEV;
2065 
2066 	switch (INTEL_GEN(i915)) {
2067 	case 12:
2068 		/*
2069 		 * Actually TGL can drive LPSP on port till DDI_C
2070 		 * but there is no physical connected DDI_C on TGL sku's,
2071 		 * even driver is not initilizing DDI_C port for gen12.
2072 		 */
2073 		LPSP_CAPABLE(encoder->port <= PORT_B);
2074 		break;
2075 	case 11:
2076 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2077 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2078 		break;
2079 	case 10:
2080 	case 9:
2081 		LPSP_CAPABLE(encoder->port == PORT_A &&
2082 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2083 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2084 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2085 		break;
2086 	default:
2087 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2088 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2089 	}
2090 
2091 	return 0;
2092 }
2093 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2094 
2095 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2096 {
2097 	struct drm_connector *connector = m->private;
2098 	struct drm_device *dev = connector->dev;
2099 	struct drm_crtc *crtc;
2100 	struct intel_dp *intel_dp;
2101 	struct drm_modeset_acquire_ctx ctx;
2102 	struct intel_crtc_state *crtc_state = NULL;
2103 	int ret = 0;
2104 	bool try_again = false;
2105 
2106 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2107 
2108 	do {
2109 		try_again = false;
2110 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2111 				       &ctx);
2112 		if (ret) {
2113 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2114 				try_again = true;
2115 				continue;
2116 			}
2117 			break;
2118 		}
2119 		crtc = connector->state->crtc;
2120 		if (connector->status != connector_status_connected || !crtc) {
2121 			ret = -ENODEV;
2122 			break;
2123 		}
2124 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2125 		if (ret == -EDEADLK) {
2126 			ret = drm_modeset_backoff(&ctx);
2127 			if (!ret) {
2128 				try_again = true;
2129 				continue;
2130 			}
2131 			break;
2132 		} else if (ret) {
2133 			break;
2134 		}
2135 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2136 		crtc_state = to_intel_crtc_state(crtc->state);
2137 		seq_printf(m, "DSC_Enabled: %s\n",
2138 			   yesno(crtc_state->dsc.compression_enable));
2139 		seq_printf(m, "DSC_Sink_Support: %s\n",
2140 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2141 		seq_printf(m, "Force_DSC_Enable: %s\n",
2142 			   yesno(intel_dp->force_dsc_en));
2143 		if (!intel_dp_is_edp(intel_dp))
2144 			seq_printf(m, "FEC_Sink_Support: %s\n",
2145 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2146 	} while (try_again);
2147 
2148 	drm_modeset_drop_locks(&ctx);
2149 	drm_modeset_acquire_fini(&ctx);
2150 
2151 	return ret;
2152 }
2153 
2154 static ssize_t i915_dsc_fec_support_write(struct file *file,
2155 					  const char __user *ubuf,
2156 					  size_t len, loff_t *offp)
2157 {
2158 	bool dsc_enable = false;
2159 	int ret;
2160 	struct drm_connector *connector =
2161 		((struct seq_file *)file->private_data)->private;
2162 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2163 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2164 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2165 
2166 	if (len == 0)
2167 		return 0;
2168 
2169 	drm_dbg(&i915->drm,
2170 		"Copied %zu bytes from user to force DSC\n", len);
2171 
2172 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2173 	if (ret < 0)
2174 		return ret;
2175 
2176 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2177 		(dsc_enable) ? "true" : "false");
2178 	intel_dp->force_dsc_en = dsc_enable;
2179 
2180 	*offp += len;
2181 	return len;
2182 }
2183 
2184 static int i915_dsc_fec_support_open(struct inode *inode,
2185 				     struct file *file)
2186 {
2187 	return single_open(file, i915_dsc_fec_support_show,
2188 			   inode->i_private);
2189 }
2190 
2191 static const struct file_operations i915_dsc_fec_support_fops = {
2192 	.owner = THIS_MODULE,
2193 	.open = i915_dsc_fec_support_open,
2194 	.read = seq_read,
2195 	.llseek = seq_lseek,
2196 	.release = single_release,
2197 	.write = i915_dsc_fec_support_write
2198 };
2199 
2200 /**
2201  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2202  * @connector: pointer to a registered drm_connector
2203  *
2204  * Cleanup will be done by drm_connector_unregister() through a call to
2205  * drm_debugfs_connector_remove().
2206  *
2207  * Returns 0 on success, negative error codes on error.
2208  */
2209 int intel_connector_debugfs_add(struct drm_connector *connector)
2210 {
2211 	struct dentry *root = connector->debugfs_entry;
2212 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2213 
2214 	/* The connector must have been registered beforehands. */
2215 	if (!root)
2216 		return -ENODEV;
2217 
2218 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2219 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2220 				    connector, &i915_panel_fops);
2221 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2222 				    connector, &i915_psr_sink_status_fops);
2223 	}
2224 
2225 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2226 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2227 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2228 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2229 				    connector, &i915_hdcp_sink_capability_fops);
2230 	}
2231 
2232 	if (INTEL_GEN(dev_priv) >= 10 &&
2233 	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2234 	      !to_intel_connector(connector)->mst_port) ||
2235 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2236 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2237 				    connector, &i915_dsc_fec_support_fops);
2238 
2239 	/* Legacy panels doesn't lpsp on any platform */
2240 	if ((INTEL_GEN(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2241 	     IS_BROADWELL(dev_priv)) &&
2242 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2243 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2244 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2245 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2246 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2247 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2248 				    connector, &i915_lpsp_capability_fops);
2249 
2250 	return 0;
2251 }
2252