1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_de.h"
14 #include "intel_display_types.h"
15 #include "intel_dp.h"
16 #include "intel_fbc.h"
17 #include "intel_hdcp.h"
18 #include "intel_hdmi.h"
19 #include "intel_pm.h"
20 #include "intel_psr.h"
21 #include "intel_sideband.h"
22 #include "intel_sprite.h"
23 
24 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
25 {
26 	return to_i915(node->minor->dev);
27 }
28 
29 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
30 {
31 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
32 
33 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
34 		   dev_priv->fb_tracking.busy_bits);
35 
36 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
37 		   dev_priv->fb_tracking.flip_bits);
38 
39 	return 0;
40 }
41 
42 static int i915_fbc_status(struct seq_file *m, void *unused)
43 {
44 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
45 	struct intel_fbc *fbc = &dev_priv->fbc;
46 	intel_wakeref_t wakeref;
47 
48 	if (!HAS_FBC(dev_priv))
49 		return -ENODEV;
50 
51 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
52 	mutex_lock(&fbc->lock);
53 
54 	if (intel_fbc_is_active(dev_priv))
55 		seq_puts(m, "FBC enabled\n");
56 	else
57 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
58 
59 	if (intel_fbc_is_active(dev_priv)) {
60 		u32 mask;
61 
62 		if (DISPLAY_VER(dev_priv) >= 8)
63 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
64 		else if (DISPLAY_VER(dev_priv) >= 7)
65 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
66 		else if (DISPLAY_VER(dev_priv) >= 5)
67 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
68 		else if (IS_G4X(dev_priv))
69 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
70 		else
71 			mask = intel_de_read(dev_priv, FBC_STATUS) &
72 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
73 
74 		seq_printf(m, "Compressing: %s\n", yesno(mask));
75 	}
76 
77 	mutex_unlock(&fbc->lock);
78 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
79 
80 	return 0;
81 }
82 
83 static int i915_fbc_false_color_get(void *data, u64 *val)
84 {
85 	struct drm_i915_private *dev_priv = data;
86 
87 	if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
88 		return -ENODEV;
89 
90 	*val = dev_priv->fbc.false_color;
91 
92 	return 0;
93 }
94 
95 static int i915_fbc_false_color_set(void *data, u64 val)
96 {
97 	struct drm_i915_private *dev_priv = data;
98 	u32 reg;
99 
100 	if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
101 		return -ENODEV;
102 
103 	mutex_lock(&dev_priv->fbc.lock);
104 
105 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
106 	dev_priv->fbc.false_color = val;
107 
108 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
109 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
110 
111 	mutex_unlock(&dev_priv->fbc.lock);
112 	return 0;
113 }
114 
115 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
116 			i915_fbc_false_color_get, i915_fbc_false_color_set,
117 			"%llu\n");
118 
119 static int i915_ips_status(struct seq_file *m, void *unused)
120 {
121 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
122 	intel_wakeref_t wakeref;
123 
124 	if (!HAS_IPS(dev_priv))
125 		return -ENODEV;
126 
127 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
128 
129 	seq_printf(m, "Enabled by kernel parameter: %s\n",
130 		   yesno(dev_priv->params.enable_ips));
131 
132 	if (DISPLAY_VER(dev_priv) >= 8) {
133 		seq_puts(m, "Currently: unknown\n");
134 	} else {
135 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
136 			seq_puts(m, "Currently: enabled\n");
137 		else
138 			seq_puts(m, "Currently: disabled\n");
139 	}
140 
141 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
142 
143 	return 0;
144 }
145 
146 static int i915_sr_status(struct seq_file *m, void *unused)
147 {
148 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
149 	intel_wakeref_t wakeref;
150 	bool sr_enabled = false;
151 
152 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
153 
154 	if (DISPLAY_VER(dev_priv) >= 9)
155 		/* no global SR status; inspect per-plane WM */;
156 	else if (HAS_PCH_SPLIT(dev_priv))
157 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
158 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
159 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
160 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
161 	else if (IS_I915GM(dev_priv))
162 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
163 	else if (IS_PINEVIEW(dev_priv))
164 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
165 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
166 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
167 
168 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
169 
170 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
171 
172 	return 0;
173 }
174 
175 static int i915_opregion(struct seq_file *m, void *unused)
176 {
177 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
178 
179 	if (opregion->header)
180 		seq_write(m, opregion->header, OPREGION_SIZE);
181 
182 	return 0;
183 }
184 
185 static int i915_vbt(struct seq_file *m, void *unused)
186 {
187 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
188 
189 	if (opregion->vbt)
190 		seq_write(m, opregion->vbt, opregion->vbt_size);
191 
192 	return 0;
193 }
194 
195 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
196 {
197 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
198 	struct drm_device *dev = &dev_priv->drm;
199 	struct intel_framebuffer *fbdev_fb = NULL;
200 	struct drm_framebuffer *drm_fb;
201 
202 #ifdef CONFIG_DRM_FBDEV_EMULATION
203 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
204 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
205 
206 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
207 			   fbdev_fb->base.width,
208 			   fbdev_fb->base.height,
209 			   fbdev_fb->base.format->depth,
210 			   fbdev_fb->base.format->cpp[0] * 8,
211 			   fbdev_fb->base.modifier,
212 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
213 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
214 		seq_putc(m, '\n');
215 	}
216 #endif
217 
218 	mutex_lock(&dev->mode_config.fb_lock);
219 	drm_for_each_fb(drm_fb, dev) {
220 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
221 		if (fb == fbdev_fb)
222 			continue;
223 
224 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
225 			   fb->base.width,
226 			   fb->base.height,
227 			   fb->base.format->depth,
228 			   fb->base.format->cpp[0] * 8,
229 			   fb->base.modifier,
230 			   drm_framebuffer_read_refcount(&fb->base));
231 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
232 		seq_putc(m, '\n');
233 	}
234 	mutex_unlock(&dev->mode_config.fb_lock);
235 
236 	return 0;
237 }
238 
239 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
240 {
241 	u8 val;
242 	static const char * const sink_status[] = {
243 		"inactive",
244 		"transition to active, capture and display",
245 		"active, display from RFB",
246 		"active, capture and display on sink device timings",
247 		"transition to inactive, capture and display, timing re-sync",
248 		"reserved",
249 		"reserved",
250 		"sink internal error",
251 	};
252 	struct drm_connector *connector = m->private;
253 	struct intel_dp *intel_dp =
254 		intel_attached_dp(to_intel_connector(connector));
255 	int ret;
256 
257 	if (!CAN_PSR(intel_dp)) {
258 		seq_puts(m, "PSR Unsupported\n");
259 		return -ENODEV;
260 	}
261 
262 	if (connector->status != connector_status_connected)
263 		return -ENODEV;
264 
265 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
266 
267 	if (ret == 1) {
268 		const char *str = "unknown";
269 
270 		val &= DP_PSR_SINK_STATE_MASK;
271 		if (val < ARRAY_SIZE(sink_status))
272 			str = sink_status[val];
273 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
274 	} else {
275 		return ret;
276 	}
277 
278 	return 0;
279 }
280 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
281 
282 static void
283 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
284 {
285 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
286 	const char *status = "unknown";
287 	u32 val, status_val;
288 
289 	if (intel_dp->psr.psr2_enabled) {
290 		static const char * const live_status[] = {
291 			"IDLE",
292 			"CAPTURE",
293 			"CAPTURE_FS",
294 			"SLEEP",
295 			"BUFON_FW",
296 			"ML_UP",
297 			"SU_STANDBY",
298 			"FAST_SLEEP",
299 			"DEEP_SLEEP",
300 			"BUF_ON",
301 			"TG_ON"
302 		};
303 		val = intel_de_read(dev_priv,
304 				    EDP_PSR2_STATUS(intel_dp->psr.transcoder));
305 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
306 			      EDP_PSR2_STATUS_STATE_SHIFT;
307 		if (status_val < ARRAY_SIZE(live_status))
308 			status = live_status[status_val];
309 	} else {
310 		static const char * const live_status[] = {
311 			"IDLE",
312 			"SRDONACK",
313 			"SRDENT",
314 			"BUFOFF",
315 			"BUFON",
316 			"AUXACK",
317 			"SRDOFFACK",
318 			"SRDENT_ON",
319 		};
320 		val = intel_de_read(dev_priv,
321 				    EDP_PSR_STATUS(intel_dp->psr.transcoder));
322 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
323 			      EDP_PSR_STATUS_STATE_SHIFT;
324 		if (status_val < ARRAY_SIZE(live_status))
325 			status = live_status[status_val];
326 	}
327 
328 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
329 }
330 
331 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
332 {
333 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
334 	struct intel_psr *psr = &intel_dp->psr;
335 	intel_wakeref_t wakeref;
336 	const char *status;
337 	bool enabled;
338 	u32 val;
339 
340 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
341 	if (psr->sink_support)
342 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
343 	seq_puts(m, "\n");
344 
345 	if (!psr->sink_support)
346 		return 0;
347 
348 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
349 	mutex_lock(&psr->lock);
350 
351 	if (psr->enabled)
352 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
353 	else
354 		status = "disabled";
355 	seq_printf(m, "PSR mode: %s\n", status);
356 
357 	if (!psr->enabled) {
358 		seq_printf(m, "PSR sink not reliable: %s\n",
359 			   yesno(psr->sink_not_reliable));
360 
361 		goto unlock;
362 	}
363 
364 	if (psr->psr2_enabled) {
365 		val = intel_de_read(dev_priv,
366 				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
367 		enabled = val & EDP_PSR2_ENABLE;
368 	} else {
369 		val = intel_de_read(dev_priv,
370 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
371 		enabled = val & EDP_PSR_ENABLE;
372 	}
373 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
374 		   enableddisabled(enabled), val);
375 	psr_source_status(intel_dp, m);
376 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
377 		   psr->busy_frontbuffer_bits);
378 
379 	/*
380 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
381 	 */
382 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
383 		val = intel_de_read(dev_priv,
384 				    EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
385 		val &= EDP_PSR_PERF_CNT_MASK;
386 		seq_printf(m, "Performance counter: %u\n", val);
387 	}
388 
389 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
390 		seq_printf(m, "Last attempted entry at: %lld\n",
391 			   psr->last_entry_attempt);
392 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
393 	}
394 
395 	if (psr->psr2_enabled) {
396 		u32 su_frames_val[3];
397 		int frame;
398 
399 		/*
400 		 * Reading all 3 registers before hand to minimize crossing a
401 		 * frame boundary between register reads
402 		 */
403 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
404 			val = intel_de_read(dev_priv,
405 					    PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
406 			su_frames_val[frame / 3] = val;
407 		}
408 
409 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
410 
411 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
412 			u32 su_blocks;
413 
414 			su_blocks = su_frames_val[frame / 3] &
415 				    PSR2_SU_STATUS_MASK(frame);
416 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
417 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
418 		}
419 
420 		seq_printf(m, "PSR2 selective fetch: %s\n",
421 			   enableddisabled(psr->psr2_sel_fetch_enabled));
422 	}
423 
424 unlock:
425 	mutex_unlock(&psr->lock);
426 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
427 
428 	return 0;
429 }
430 
431 static int i915_edp_psr_status(struct seq_file *m, void *data)
432 {
433 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
434 	struct intel_dp *intel_dp = NULL;
435 	struct intel_encoder *encoder;
436 
437 	if (!HAS_PSR(dev_priv))
438 		return -ENODEV;
439 
440 	/* Find the first EDP which supports PSR */
441 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
442 		intel_dp = enc_to_intel_dp(encoder);
443 		break;
444 	}
445 
446 	if (!intel_dp)
447 		return -ENODEV;
448 
449 	return intel_psr_status(m, intel_dp);
450 }
451 
452 static int
453 i915_edp_psr_debug_set(void *data, u64 val)
454 {
455 	struct drm_i915_private *dev_priv = data;
456 	struct intel_encoder *encoder;
457 	intel_wakeref_t wakeref;
458 	int ret = -ENODEV;
459 
460 	if (!HAS_PSR(dev_priv))
461 		return ret;
462 
463 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
464 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
465 
466 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
467 
468 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
469 
470 		// TODO: split to each transcoder's PSR debug state
471 		ret = intel_psr_debug_set(intel_dp, val);
472 
473 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
474 	}
475 
476 	return ret;
477 }
478 
479 static int
480 i915_edp_psr_debug_get(void *data, u64 *val)
481 {
482 	struct drm_i915_private *dev_priv = data;
483 	struct intel_encoder *encoder;
484 
485 	if (!HAS_PSR(dev_priv))
486 		return -ENODEV;
487 
488 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
489 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
490 
491 		// TODO: split to each transcoder's PSR debug state
492 		*val = READ_ONCE(intel_dp->psr.debug);
493 		return 0;
494 	}
495 
496 	return -ENODEV;
497 }
498 
499 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
500 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
501 			"%llu\n");
502 
503 static int i915_power_domain_info(struct seq_file *m, void *unused)
504 {
505 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
506 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
507 	int i;
508 
509 	mutex_lock(&power_domains->lock);
510 
511 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
512 	for (i = 0; i < power_domains->power_well_count; i++) {
513 		struct i915_power_well *power_well;
514 		enum intel_display_power_domain power_domain;
515 
516 		power_well = &power_domains->power_wells[i];
517 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
518 			   power_well->count);
519 
520 		for_each_power_domain(power_domain, power_well->desc->domains)
521 			seq_printf(m, "  %-23s %d\n",
522 				 intel_display_power_domain_str(power_domain),
523 				 power_domains->domain_use_count[power_domain]);
524 	}
525 
526 	mutex_unlock(&power_domains->lock);
527 
528 	return 0;
529 }
530 
531 static int i915_dmc_info(struct seq_file *m, void *unused)
532 {
533 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
534 	intel_wakeref_t wakeref;
535 	struct intel_csr *csr;
536 	i915_reg_t dc5_reg, dc6_reg = {};
537 
538 	if (!HAS_CSR(dev_priv))
539 		return -ENODEV;
540 
541 	csr = &dev_priv->csr;
542 
543 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
544 
545 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
546 	seq_printf(m, "path: %s\n", csr->fw_path);
547 
548 	if (!csr->dmc_payload)
549 		goto out;
550 
551 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
552 		   CSR_VERSION_MINOR(csr->version));
553 
554 	if (DISPLAY_VER(dev_priv) >= 12) {
555 		if (IS_DGFX(dev_priv)) {
556 			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
557 		} else {
558 			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
559 			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
560 		}
561 
562 		/*
563 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
564 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
565 		 * reg for DC3CO debugging and validation,
566 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
567 		 */
568 		seq_printf(m, "DC3CO count: %d\n",
569 			   intel_de_read(dev_priv, DMC_DEBUG3));
570 	} else {
571 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
572 						 SKL_CSR_DC3_DC5_COUNT;
573 		if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv))
574 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
575 	}
576 
577 	seq_printf(m, "DC3 -> DC5 count: %d\n",
578 		   intel_de_read(dev_priv, dc5_reg));
579 	if (dc6_reg.reg)
580 		seq_printf(m, "DC5 -> DC6 count: %d\n",
581 			   intel_de_read(dev_priv, dc6_reg));
582 
583 out:
584 	seq_printf(m, "program base: 0x%08x\n",
585 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
586 	seq_printf(m, "ssp base: 0x%08x\n",
587 		   intel_de_read(dev_priv, CSR_SSP_BASE));
588 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
589 
590 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
591 
592 	return 0;
593 }
594 
595 static void intel_seq_print_mode(struct seq_file *m, int tabs,
596 				 const struct drm_display_mode *mode)
597 {
598 	int i;
599 
600 	for (i = 0; i < tabs; i++)
601 		seq_putc(m, '\t');
602 
603 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
604 }
605 
606 static void intel_encoder_info(struct seq_file *m,
607 			       struct intel_crtc *crtc,
608 			       struct intel_encoder *encoder)
609 {
610 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
611 	struct drm_connector_list_iter conn_iter;
612 	struct drm_connector *connector;
613 
614 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
615 		   encoder->base.base.id, encoder->base.name);
616 
617 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
618 	drm_for_each_connector_iter(connector, &conn_iter) {
619 		const struct drm_connector_state *conn_state =
620 			connector->state;
621 
622 		if (conn_state->best_encoder != &encoder->base)
623 			continue;
624 
625 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
626 			   connector->base.id, connector->name);
627 	}
628 	drm_connector_list_iter_end(&conn_iter);
629 }
630 
631 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
632 {
633 	const struct drm_display_mode *mode = panel->fixed_mode;
634 
635 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
636 }
637 
638 static void intel_hdcp_info(struct seq_file *m,
639 			    struct intel_connector *intel_connector)
640 {
641 	bool hdcp_cap, hdcp2_cap;
642 
643 	if (!intel_connector->hdcp.shim) {
644 		seq_puts(m, "No Connector Support");
645 		goto out;
646 	}
647 
648 	hdcp_cap = intel_hdcp_capable(intel_connector);
649 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
650 
651 	if (hdcp_cap)
652 		seq_puts(m, "HDCP1.4 ");
653 	if (hdcp2_cap)
654 		seq_puts(m, "HDCP2.2 ");
655 
656 	if (!hdcp_cap && !hdcp2_cap)
657 		seq_puts(m, "None");
658 
659 out:
660 	seq_puts(m, "\n");
661 }
662 
663 static void intel_dp_info(struct seq_file *m,
664 			  struct intel_connector *intel_connector)
665 {
666 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
667 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
668 	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
669 
670 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
671 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
672 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
673 		intel_panel_info(m, &intel_connector->panel);
674 
675 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
676 				edid ? edid->data : NULL, &intel_dp->aux);
677 }
678 
679 static void intel_dp_mst_info(struct seq_file *m,
680 			      struct intel_connector *intel_connector)
681 {
682 	bool has_audio = intel_connector->port->has_audio;
683 
684 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
685 }
686 
687 static void intel_hdmi_info(struct seq_file *m,
688 			    struct intel_connector *intel_connector)
689 {
690 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
691 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
692 
693 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
694 }
695 
696 static void intel_lvds_info(struct seq_file *m,
697 			    struct intel_connector *intel_connector)
698 {
699 	intel_panel_info(m, &intel_connector->panel);
700 }
701 
702 static void intel_connector_info(struct seq_file *m,
703 				 struct drm_connector *connector)
704 {
705 	struct intel_connector *intel_connector = to_intel_connector(connector);
706 	const struct drm_connector_state *conn_state = connector->state;
707 	struct intel_encoder *encoder =
708 		to_intel_encoder(conn_state->best_encoder);
709 	const struct drm_display_mode *mode;
710 
711 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
712 		   connector->base.id, connector->name,
713 		   drm_get_connector_status_name(connector->status));
714 
715 	if (connector->status == connector_status_disconnected)
716 		return;
717 
718 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
719 		   connector->display_info.width_mm,
720 		   connector->display_info.height_mm);
721 	seq_printf(m, "\tsubpixel order: %s\n",
722 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
723 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
724 
725 	if (!encoder)
726 		return;
727 
728 	switch (connector->connector_type) {
729 	case DRM_MODE_CONNECTOR_DisplayPort:
730 	case DRM_MODE_CONNECTOR_eDP:
731 		if (encoder->type == INTEL_OUTPUT_DP_MST)
732 			intel_dp_mst_info(m, intel_connector);
733 		else
734 			intel_dp_info(m, intel_connector);
735 		break;
736 	case DRM_MODE_CONNECTOR_LVDS:
737 		if (encoder->type == INTEL_OUTPUT_LVDS)
738 			intel_lvds_info(m, intel_connector);
739 		break;
740 	case DRM_MODE_CONNECTOR_HDMIA:
741 		if (encoder->type == INTEL_OUTPUT_HDMI ||
742 		    encoder->type == INTEL_OUTPUT_DDI)
743 			intel_hdmi_info(m, intel_connector);
744 		break;
745 	default:
746 		break;
747 	}
748 
749 	seq_puts(m, "\tHDCP version: ");
750 	intel_hdcp_info(m, intel_connector);
751 
752 	seq_printf(m, "\tmodes:\n");
753 	list_for_each_entry(mode, &connector->modes, head)
754 		intel_seq_print_mode(m, 2, mode);
755 }
756 
757 static const char *plane_type(enum drm_plane_type type)
758 {
759 	switch (type) {
760 	case DRM_PLANE_TYPE_OVERLAY:
761 		return "OVL";
762 	case DRM_PLANE_TYPE_PRIMARY:
763 		return "PRI";
764 	case DRM_PLANE_TYPE_CURSOR:
765 		return "CUR";
766 	/*
767 	 * Deliberately omitting default: to generate compiler warnings
768 	 * when a new drm_plane_type gets added.
769 	 */
770 	}
771 
772 	return "unknown";
773 }
774 
775 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
776 {
777 	/*
778 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
779 	 * will print them all to visualize if the values are misused
780 	 */
781 	snprintf(buf, bufsize,
782 		 "%s%s%s%s%s%s(0x%08x)",
783 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
784 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
785 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
786 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
787 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
788 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
789 		 rotation);
790 }
791 
792 static const char *plane_visibility(const struct intel_plane_state *plane_state)
793 {
794 	if (plane_state->uapi.visible)
795 		return "visible";
796 
797 	if (plane_state->planar_slave)
798 		return "planar-slave";
799 
800 	return "hidden";
801 }
802 
803 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
804 {
805 	const struct intel_plane_state *plane_state =
806 		to_intel_plane_state(plane->base.state);
807 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
808 	struct drm_rect src, dst;
809 	char rot_str[48];
810 
811 	src = drm_plane_state_src(&plane_state->uapi);
812 	dst = drm_plane_state_dest(&plane_state->uapi);
813 
814 	plane_rotation(rot_str, sizeof(rot_str),
815 		       plane_state->uapi.rotation);
816 
817 	seq_puts(m, "\t\tuapi: [FB:");
818 	if (fb)
819 		seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
820 			   &fb->format->format, fb->modifier, fb->width,
821 			   fb->height);
822 	else
823 		seq_puts(m, "0] n/a,0x0,0x0,");
824 	seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
825 		   ", rotation=%s\n", plane_visibility(plane_state),
826 		   DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
827 
828 	if (plane_state->planar_linked_plane)
829 		seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
830 			   plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
831 			   plane_state->planar_slave ? "slave" : "master");
832 }
833 
834 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
835 {
836 	const struct intel_plane_state *plane_state =
837 		to_intel_plane_state(plane->base.state);
838 	const struct drm_framebuffer *fb = plane_state->hw.fb;
839 	char rot_str[48];
840 
841 	if (!fb)
842 		return;
843 
844 	plane_rotation(rot_str, sizeof(rot_str),
845 		       plane_state->hw.rotation);
846 
847 	seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
848 		   DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
849 		   fb->base.id, &fb->format->format,
850 		   fb->modifier, fb->width, fb->height,
851 		   yesno(plane_state->uapi.visible),
852 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
853 		   DRM_RECT_ARG(&plane_state->uapi.dst),
854 		   rot_str);
855 }
856 
857 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
858 {
859 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
860 	struct intel_plane *plane;
861 
862 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
863 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
864 			   plane->base.base.id, plane->base.name,
865 			   plane_type(plane->base.type));
866 		intel_plane_uapi_info(m, plane);
867 		intel_plane_hw_info(m, plane);
868 	}
869 }
870 
871 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
872 {
873 	const struct intel_crtc_state *crtc_state =
874 		to_intel_crtc_state(crtc->base.state);
875 	int num_scalers = crtc->num_scalers;
876 	int i;
877 
878 	/* Not all platformas have a scaler */
879 	if (num_scalers) {
880 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
881 			   num_scalers,
882 			   crtc_state->scaler_state.scaler_users,
883 			   crtc_state->scaler_state.scaler_id);
884 
885 		for (i = 0; i < num_scalers; i++) {
886 			const struct intel_scaler *sc =
887 				&crtc_state->scaler_state.scalers[i];
888 
889 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
890 				   i, yesno(sc->in_use), sc->mode);
891 		}
892 		seq_puts(m, "\n");
893 	} else {
894 		seq_puts(m, "\tNo scalers available on this platform\n");
895 	}
896 }
897 
898 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
899 static void crtc_updates_info(struct seq_file *m,
900 			      struct intel_crtc *crtc,
901 			      const char *hdr)
902 {
903 	u64 count;
904 	int row;
905 
906 	count = 0;
907 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
908 		count += crtc->debug.vbl.times[row];
909 	seq_printf(m, "%sUpdates: %llu\n", hdr, count);
910 	if (!count)
911 		return;
912 
913 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
914 		char columns[80] = "       |";
915 		unsigned int x;
916 
917 		if (row & 1) {
918 			const char *units;
919 
920 			if (row > 10) {
921 				x = 1000000;
922 				units = "ms";
923 			} else {
924 				x = 1000;
925 				units = "us";
926 			}
927 
928 			snprintf(columns, sizeof(columns), "%4ld%s |",
929 				 DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
930 		}
931 
932 		if (crtc->debug.vbl.times[row]) {
933 			x = ilog2(crtc->debug.vbl.times[row]);
934 			memset(columns + 8, '*', x);
935 			columns[8 + x] = '\0';
936 		}
937 
938 		seq_printf(m, "%s%s\n", hdr, columns);
939 	}
940 
941 	seq_printf(m, "%sMin update: %lluns\n",
942 		   hdr, crtc->debug.vbl.min);
943 	seq_printf(m, "%sMax update: %lluns\n",
944 		   hdr, crtc->debug.vbl.max);
945 	seq_printf(m, "%sAverage update: %lluns\n",
946 		   hdr, div64_u64(crtc->debug.vbl.sum,  count));
947 	seq_printf(m, "%sOverruns > %uus: %u\n",
948 		   hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
949 }
950 
951 static int crtc_updates_show(struct seq_file *m, void *data)
952 {
953 	crtc_updates_info(m, m->private, "");
954 	return 0;
955 }
956 
957 static int crtc_updates_open(struct inode *inode, struct file *file)
958 {
959 	return single_open(file, crtc_updates_show, inode->i_private);
960 }
961 
962 static ssize_t crtc_updates_write(struct file *file,
963 				  const char __user *ubuf,
964 				  size_t len, loff_t *offp)
965 {
966 	struct seq_file *m = file->private_data;
967 	struct intel_crtc *crtc = m->private;
968 
969 	/* May race with an update. Meh. */
970 	memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
971 
972 	return len;
973 }
974 
975 static const struct file_operations crtc_updates_fops = {
976 	.owner = THIS_MODULE,
977 	.open = crtc_updates_open,
978 	.read = seq_read,
979 	.llseek = seq_lseek,
980 	.release = single_release,
981 	.write = crtc_updates_write
982 };
983 
984 static void crtc_updates_add(struct drm_crtc *crtc)
985 {
986 	debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
987 			    to_intel_crtc(crtc), &crtc_updates_fops);
988 }
989 
990 #else
991 static void crtc_updates_info(struct seq_file *m,
992 			      struct intel_crtc *crtc,
993 			      const char *hdr)
994 {
995 }
996 
997 static void crtc_updates_add(struct drm_crtc *crtc)
998 {
999 }
1000 #endif
1001 
1002 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
1003 {
1004 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1005 	const struct intel_crtc_state *crtc_state =
1006 		to_intel_crtc_state(crtc->base.state);
1007 	struct intel_encoder *encoder;
1008 
1009 	seq_printf(m, "[CRTC:%d:%s]:\n",
1010 		   crtc->base.base.id, crtc->base.name);
1011 
1012 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
1013 		   yesno(crtc_state->uapi.enable),
1014 		   yesno(crtc_state->uapi.active),
1015 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
1016 
1017 	if (crtc_state->hw.enable) {
1018 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
1019 			   yesno(crtc_state->hw.active),
1020 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
1021 
1022 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
1023 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
1024 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
1025 
1026 		intel_scaler_info(m, crtc);
1027 	}
1028 
1029 	if (crtc_state->bigjoiner)
1030 		seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
1031 			   crtc_state->bigjoiner_linked_crtc->base.base.id,
1032 			   crtc_state->bigjoiner_linked_crtc->base.name,
1033 			   crtc_state->bigjoiner_slave ? "slave" : "master");
1034 
1035 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
1036 				    crtc_state->uapi.encoder_mask)
1037 		intel_encoder_info(m, crtc, encoder);
1038 
1039 	intel_plane_info(m, crtc);
1040 
1041 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
1042 		   yesno(!crtc->cpu_fifo_underrun_disabled),
1043 		   yesno(!crtc->pch_fifo_underrun_disabled));
1044 
1045 	crtc_updates_info(m, crtc, "\t");
1046 }
1047 
1048 static int i915_display_info(struct seq_file *m, void *unused)
1049 {
1050 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1051 	struct drm_device *dev = &dev_priv->drm;
1052 	struct intel_crtc *crtc;
1053 	struct drm_connector *connector;
1054 	struct drm_connector_list_iter conn_iter;
1055 	intel_wakeref_t wakeref;
1056 
1057 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1058 
1059 	drm_modeset_lock_all(dev);
1060 
1061 	seq_printf(m, "CRTC info\n");
1062 	seq_printf(m, "---------\n");
1063 	for_each_intel_crtc(dev, crtc)
1064 		intel_crtc_info(m, crtc);
1065 
1066 	seq_printf(m, "\n");
1067 	seq_printf(m, "Connector info\n");
1068 	seq_printf(m, "--------------\n");
1069 	drm_connector_list_iter_begin(dev, &conn_iter);
1070 	drm_for_each_connector_iter(connector, &conn_iter)
1071 		intel_connector_info(m, connector);
1072 	drm_connector_list_iter_end(&conn_iter);
1073 
1074 	drm_modeset_unlock_all(dev);
1075 
1076 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1077 
1078 	return 0;
1079 }
1080 
1081 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
1082 {
1083 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1084 	struct drm_device *dev = &dev_priv->drm;
1085 	int i;
1086 
1087 	drm_modeset_lock_all(dev);
1088 
1089 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
1090 		   dev_priv->dpll.ref_clks.nssc,
1091 		   dev_priv->dpll.ref_clks.ssc);
1092 
1093 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
1094 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
1095 
1096 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
1097 			   pll->info->id);
1098 		seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
1099 			   pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
1100 		seq_printf(m, " tracked hardware state:\n");
1101 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
1102 		seq_printf(m, " dpll_md: 0x%08x\n",
1103 			   pll->state.hw_state.dpll_md);
1104 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
1105 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
1106 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
1107 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
1108 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
1109 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
1110 			   pll->state.hw_state.mg_refclkin_ctl);
1111 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
1112 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
1113 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
1114 			   pll->state.hw_state.mg_clktop2_hsclkctl);
1115 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
1116 			   pll->state.hw_state.mg_pll_div0);
1117 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
1118 			   pll->state.hw_state.mg_pll_div1);
1119 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
1120 			   pll->state.hw_state.mg_pll_lf);
1121 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
1122 			   pll->state.hw_state.mg_pll_frac_lock);
1123 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
1124 			   pll->state.hw_state.mg_pll_ssc);
1125 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
1126 			   pll->state.hw_state.mg_pll_bias);
1127 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
1128 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
1129 	}
1130 	drm_modeset_unlock_all(dev);
1131 
1132 	return 0;
1133 }
1134 
1135 static int i915_ipc_status_show(struct seq_file *m, void *data)
1136 {
1137 	struct drm_i915_private *dev_priv = m->private;
1138 
1139 	seq_printf(m, "Isochronous Priority Control: %s\n",
1140 			yesno(dev_priv->ipc_enabled));
1141 	return 0;
1142 }
1143 
1144 static int i915_ipc_status_open(struct inode *inode, struct file *file)
1145 {
1146 	struct drm_i915_private *dev_priv = inode->i_private;
1147 
1148 	if (!HAS_IPC(dev_priv))
1149 		return -ENODEV;
1150 
1151 	return single_open(file, i915_ipc_status_show, dev_priv);
1152 }
1153 
1154 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1155 				     size_t len, loff_t *offp)
1156 {
1157 	struct seq_file *m = file->private_data;
1158 	struct drm_i915_private *dev_priv = m->private;
1159 	intel_wakeref_t wakeref;
1160 	bool enable;
1161 	int ret;
1162 
1163 	ret = kstrtobool_from_user(ubuf, len, &enable);
1164 	if (ret < 0)
1165 		return ret;
1166 
1167 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1168 		if (!dev_priv->ipc_enabled && enable)
1169 			drm_info(&dev_priv->drm,
1170 				 "Enabling IPC: WM will be proper only after next commit\n");
1171 		dev_priv->ipc_enabled = enable;
1172 		intel_enable_ipc(dev_priv);
1173 	}
1174 
1175 	return len;
1176 }
1177 
1178 static const struct file_operations i915_ipc_status_fops = {
1179 	.owner = THIS_MODULE,
1180 	.open = i915_ipc_status_open,
1181 	.read = seq_read,
1182 	.llseek = seq_lseek,
1183 	.release = single_release,
1184 	.write = i915_ipc_status_write
1185 };
1186 
1187 static int i915_ddb_info(struct seq_file *m, void *unused)
1188 {
1189 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1190 	struct drm_device *dev = &dev_priv->drm;
1191 	struct skl_ddb_entry *entry;
1192 	struct intel_crtc *crtc;
1193 
1194 	if (DISPLAY_VER(dev_priv) < 9)
1195 		return -ENODEV;
1196 
1197 	drm_modeset_lock_all(dev);
1198 
1199 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1200 
1201 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1202 		struct intel_crtc_state *crtc_state =
1203 			to_intel_crtc_state(crtc->base.state);
1204 		enum pipe pipe = crtc->pipe;
1205 		enum plane_id plane_id;
1206 
1207 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1208 
1209 		for_each_plane_id_on_crtc(crtc, plane_id) {
1210 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1211 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1212 				   entry->start, entry->end,
1213 				   skl_ddb_entry_size(entry));
1214 		}
1215 
1216 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1217 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1218 			   entry->end, skl_ddb_entry_size(entry));
1219 	}
1220 
1221 	drm_modeset_unlock_all(dev);
1222 
1223 	return 0;
1224 }
1225 
1226 static void drrs_status_per_crtc(struct seq_file *m,
1227 				 struct drm_device *dev,
1228 				 struct intel_crtc *intel_crtc)
1229 {
1230 	struct drm_i915_private *dev_priv = to_i915(dev);
1231 	struct i915_drrs *drrs = &dev_priv->drrs;
1232 	int vrefresh = 0;
1233 	struct drm_connector *connector;
1234 	struct drm_connector_list_iter conn_iter;
1235 
1236 	drm_connector_list_iter_begin(dev, &conn_iter);
1237 	drm_for_each_connector_iter(connector, &conn_iter) {
1238 		bool supported = false;
1239 
1240 		if (connector->state->crtc != &intel_crtc->base)
1241 			continue;
1242 
1243 		seq_printf(m, "%s:\n", connector->name);
1244 
1245 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1246 		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1247 			supported = true;
1248 
1249 		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1250 	}
1251 	drm_connector_list_iter_end(&conn_iter);
1252 
1253 	seq_puts(m, "\n");
1254 
1255 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1256 		struct intel_panel *panel;
1257 
1258 		mutex_lock(&drrs->mutex);
1259 		/* DRRS Supported */
1260 		seq_puts(m, "\tDRRS Enabled: Yes\n");
1261 
1262 		/* disable_drrs() will make drrs->dp NULL */
1263 		if (!drrs->dp) {
1264 			seq_puts(m, "Idleness DRRS: Disabled\n");
1265 			mutex_unlock(&drrs->mutex);
1266 			return;
1267 		}
1268 
1269 		panel = &drrs->dp->attached_connector->panel;
1270 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1271 					drrs->busy_frontbuffer_bits);
1272 
1273 		seq_puts(m, "\n\t\t");
1274 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1275 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1276 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1277 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1278 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1279 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1280 		} else {
1281 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1282 						drrs->refresh_rate_type);
1283 			mutex_unlock(&drrs->mutex);
1284 			return;
1285 		}
1286 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1287 
1288 		seq_puts(m, "\n\t\t");
1289 		mutex_unlock(&drrs->mutex);
1290 	} else {
1291 		/* DRRS not supported. Print the VBT parameter*/
1292 		seq_puts(m, "\tDRRS Enabled : No");
1293 	}
1294 	seq_puts(m, "\n");
1295 }
1296 
1297 static int i915_drrs_status(struct seq_file *m, void *unused)
1298 {
1299 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1300 	struct drm_device *dev = &dev_priv->drm;
1301 	struct intel_crtc *intel_crtc;
1302 	int active_crtc_cnt = 0;
1303 
1304 	drm_modeset_lock_all(dev);
1305 	for_each_intel_crtc(dev, intel_crtc) {
1306 		if (intel_crtc->base.state->active) {
1307 			active_crtc_cnt++;
1308 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1309 
1310 			drrs_status_per_crtc(m, dev, intel_crtc);
1311 		}
1312 	}
1313 	drm_modeset_unlock_all(dev);
1314 
1315 	if (!active_crtc_cnt)
1316 		seq_puts(m, "No active crtc found\n");
1317 
1318 	return 0;
1319 }
1320 
1321 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1322 				seq_puts(m, "LPSP: disabled\n"))
1323 
1324 static bool
1325 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1326 			      enum i915_power_well_id power_well_id)
1327 {
1328 	intel_wakeref_t wakeref;
1329 	bool is_enabled;
1330 
1331 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1332 	is_enabled = intel_display_power_well_is_enabled(i915,
1333 							 power_well_id);
1334 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1335 
1336 	return is_enabled;
1337 }
1338 
1339 static int i915_lpsp_status(struct seq_file *m, void *unused)
1340 {
1341 	struct drm_i915_private *i915 = node_to_i915(m->private);
1342 
1343 	if (DISPLAY_VER(i915) >= 13) {
1344 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915,
1345 							   SKL_DISP_PW_2));
1346 		return 0;
1347 	}
1348 
1349 	switch (DISPLAY_VER(i915)) {
1350 	case 12:
1351 	case 11:
1352 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1353 		break;
1354 	case 10:
1355 	case 9:
1356 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1357 		break;
1358 	default:
1359 		/*
1360 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1361 		 * support lpsp.
1362 		 */
1363 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1364 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1365 		else
1366 			seq_puts(m, "LPSP: not supported\n");
1367 	}
1368 
1369 	return 0;
1370 }
1371 
1372 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1373 {
1374 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1375 	struct drm_device *dev = &dev_priv->drm;
1376 	struct intel_encoder *intel_encoder;
1377 	struct intel_digital_port *dig_port;
1378 	struct drm_connector *connector;
1379 	struct drm_connector_list_iter conn_iter;
1380 
1381 	drm_connector_list_iter_begin(dev, &conn_iter);
1382 	drm_for_each_connector_iter(connector, &conn_iter) {
1383 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1384 			continue;
1385 
1386 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1387 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1388 			continue;
1389 
1390 		dig_port = enc_to_dig_port(intel_encoder);
1391 		if (!dig_port->dp.can_mst)
1392 			continue;
1393 
1394 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1395 			   dig_port->base.base.base.id,
1396 			   dig_port->base.base.name);
1397 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1398 	}
1399 	drm_connector_list_iter_end(&conn_iter);
1400 
1401 	return 0;
1402 }
1403 
1404 static ssize_t i915_displayport_test_active_write(struct file *file,
1405 						  const char __user *ubuf,
1406 						  size_t len, loff_t *offp)
1407 {
1408 	char *input_buffer;
1409 	int status = 0;
1410 	struct drm_device *dev;
1411 	struct drm_connector *connector;
1412 	struct drm_connector_list_iter conn_iter;
1413 	struct intel_dp *intel_dp;
1414 	int val = 0;
1415 
1416 	dev = ((struct seq_file *)file->private_data)->private;
1417 
1418 	if (len == 0)
1419 		return 0;
1420 
1421 	input_buffer = memdup_user_nul(ubuf, len);
1422 	if (IS_ERR(input_buffer))
1423 		return PTR_ERR(input_buffer);
1424 
1425 	drm_dbg(&to_i915(dev)->drm,
1426 		"Copied %d bytes from user\n", (unsigned int)len);
1427 
1428 	drm_connector_list_iter_begin(dev, &conn_iter);
1429 	drm_for_each_connector_iter(connector, &conn_iter) {
1430 		struct intel_encoder *encoder;
1431 
1432 		if (connector->connector_type !=
1433 		    DRM_MODE_CONNECTOR_DisplayPort)
1434 			continue;
1435 
1436 		encoder = to_intel_encoder(connector->encoder);
1437 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1438 			continue;
1439 
1440 		if (encoder && connector->status == connector_status_connected) {
1441 			intel_dp = enc_to_intel_dp(encoder);
1442 			status = kstrtoint(input_buffer, 10, &val);
1443 			if (status < 0)
1444 				break;
1445 			drm_dbg(&to_i915(dev)->drm,
1446 				"Got %d for test active\n", val);
1447 			/* To prevent erroneous activation of the compliance
1448 			 * testing code, only accept an actual value of 1 here
1449 			 */
1450 			if (val == 1)
1451 				intel_dp->compliance.test_active = true;
1452 			else
1453 				intel_dp->compliance.test_active = false;
1454 		}
1455 	}
1456 	drm_connector_list_iter_end(&conn_iter);
1457 	kfree(input_buffer);
1458 	if (status < 0)
1459 		return status;
1460 
1461 	*offp += len;
1462 	return len;
1463 }
1464 
1465 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1466 {
1467 	struct drm_i915_private *dev_priv = m->private;
1468 	struct drm_device *dev = &dev_priv->drm;
1469 	struct drm_connector *connector;
1470 	struct drm_connector_list_iter conn_iter;
1471 	struct intel_dp *intel_dp;
1472 
1473 	drm_connector_list_iter_begin(dev, &conn_iter);
1474 	drm_for_each_connector_iter(connector, &conn_iter) {
1475 		struct intel_encoder *encoder;
1476 
1477 		if (connector->connector_type !=
1478 		    DRM_MODE_CONNECTOR_DisplayPort)
1479 			continue;
1480 
1481 		encoder = to_intel_encoder(connector->encoder);
1482 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1483 			continue;
1484 
1485 		if (encoder && connector->status == connector_status_connected) {
1486 			intel_dp = enc_to_intel_dp(encoder);
1487 			if (intel_dp->compliance.test_active)
1488 				seq_puts(m, "1");
1489 			else
1490 				seq_puts(m, "0");
1491 		} else
1492 			seq_puts(m, "0");
1493 	}
1494 	drm_connector_list_iter_end(&conn_iter);
1495 
1496 	return 0;
1497 }
1498 
1499 static int i915_displayport_test_active_open(struct inode *inode,
1500 					     struct file *file)
1501 {
1502 	return single_open(file, i915_displayport_test_active_show,
1503 			   inode->i_private);
1504 }
1505 
1506 static const struct file_operations i915_displayport_test_active_fops = {
1507 	.owner = THIS_MODULE,
1508 	.open = i915_displayport_test_active_open,
1509 	.read = seq_read,
1510 	.llseek = seq_lseek,
1511 	.release = single_release,
1512 	.write = i915_displayport_test_active_write
1513 };
1514 
1515 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1516 {
1517 	struct drm_i915_private *dev_priv = m->private;
1518 	struct drm_device *dev = &dev_priv->drm;
1519 	struct drm_connector *connector;
1520 	struct drm_connector_list_iter conn_iter;
1521 	struct intel_dp *intel_dp;
1522 
1523 	drm_connector_list_iter_begin(dev, &conn_iter);
1524 	drm_for_each_connector_iter(connector, &conn_iter) {
1525 		struct intel_encoder *encoder;
1526 
1527 		if (connector->connector_type !=
1528 		    DRM_MODE_CONNECTOR_DisplayPort)
1529 			continue;
1530 
1531 		encoder = to_intel_encoder(connector->encoder);
1532 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1533 			continue;
1534 
1535 		if (encoder && connector->status == connector_status_connected) {
1536 			intel_dp = enc_to_intel_dp(encoder);
1537 			if (intel_dp->compliance.test_type ==
1538 			    DP_TEST_LINK_EDID_READ)
1539 				seq_printf(m, "%lx",
1540 					   intel_dp->compliance.test_data.edid);
1541 			else if (intel_dp->compliance.test_type ==
1542 				 DP_TEST_LINK_VIDEO_PATTERN) {
1543 				seq_printf(m, "hdisplay: %d\n",
1544 					   intel_dp->compliance.test_data.hdisplay);
1545 				seq_printf(m, "vdisplay: %d\n",
1546 					   intel_dp->compliance.test_data.vdisplay);
1547 				seq_printf(m, "bpc: %u\n",
1548 					   intel_dp->compliance.test_data.bpc);
1549 			} else if (intel_dp->compliance.test_type ==
1550 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1551 				seq_printf(m, "pattern: %d\n",
1552 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1553 				seq_printf(m, "Number of lanes: %d\n",
1554 					   intel_dp->compliance.test_data.phytest.num_lanes);
1555 				seq_printf(m, "Link Rate: %d\n",
1556 					   intel_dp->compliance.test_data.phytest.link_rate);
1557 				seq_printf(m, "level: %02x\n",
1558 					   intel_dp->train_set[0]);
1559 			}
1560 		} else
1561 			seq_puts(m, "0");
1562 	}
1563 	drm_connector_list_iter_end(&conn_iter);
1564 
1565 	return 0;
1566 }
1567 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1568 
1569 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1570 {
1571 	struct drm_i915_private *dev_priv = m->private;
1572 	struct drm_device *dev = &dev_priv->drm;
1573 	struct drm_connector *connector;
1574 	struct drm_connector_list_iter conn_iter;
1575 	struct intel_dp *intel_dp;
1576 
1577 	drm_connector_list_iter_begin(dev, &conn_iter);
1578 	drm_for_each_connector_iter(connector, &conn_iter) {
1579 		struct intel_encoder *encoder;
1580 
1581 		if (connector->connector_type !=
1582 		    DRM_MODE_CONNECTOR_DisplayPort)
1583 			continue;
1584 
1585 		encoder = to_intel_encoder(connector->encoder);
1586 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1587 			continue;
1588 
1589 		if (encoder && connector->status == connector_status_connected) {
1590 			intel_dp = enc_to_intel_dp(encoder);
1591 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1592 		} else
1593 			seq_puts(m, "0");
1594 	}
1595 	drm_connector_list_iter_end(&conn_iter);
1596 
1597 	return 0;
1598 }
1599 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1600 
1601 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1602 {
1603 	struct drm_i915_private *dev_priv = m->private;
1604 	struct drm_device *dev = &dev_priv->drm;
1605 	int level;
1606 	int num_levels;
1607 
1608 	if (IS_CHERRYVIEW(dev_priv))
1609 		num_levels = 3;
1610 	else if (IS_VALLEYVIEW(dev_priv))
1611 		num_levels = 1;
1612 	else if (IS_G4X(dev_priv))
1613 		num_levels = 3;
1614 	else
1615 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1616 
1617 	drm_modeset_lock_all(dev);
1618 
1619 	for (level = 0; level < num_levels; level++) {
1620 		unsigned int latency = wm[level];
1621 
1622 		/*
1623 		 * - WM1+ latency values in 0.5us units
1624 		 * - latencies are in us on gen9/vlv/chv
1625 		 */
1626 		if (DISPLAY_VER(dev_priv) >= 9 ||
1627 		    IS_VALLEYVIEW(dev_priv) ||
1628 		    IS_CHERRYVIEW(dev_priv) ||
1629 		    IS_G4X(dev_priv))
1630 			latency *= 10;
1631 		else if (level > 0)
1632 			latency *= 5;
1633 
1634 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1635 			   level, wm[level], latency / 10, latency % 10);
1636 	}
1637 
1638 	drm_modeset_unlock_all(dev);
1639 }
1640 
1641 static int pri_wm_latency_show(struct seq_file *m, void *data)
1642 {
1643 	struct drm_i915_private *dev_priv = m->private;
1644 	const u16 *latencies;
1645 
1646 	if (DISPLAY_VER(dev_priv) >= 9)
1647 		latencies = dev_priv->wm.skl_latency;
1648 	else
1649 		latencies = dev_priv->wm.pri_latency;
1650 
1651 	wm_latency_show(m, latencies);
1652 
1653 	return 0;
1654 }
1655 
1656 static int spr_wm_latency_show(struct seq_file *m, void *data)
1657 {
1658 	struct drm_i915_private *dev_priv = m->private;
1659 	const u16 *latencies;
1660 
1661 	if (DISPLAY_VER(dev_priv) >= 9)
1662 		latencies = dev_priv->wm.skl_latency;
1663 	else
1664 		latencies = dev_priv->wm.spr_latency;
1665 
1666 	wm_latency_show(m, latencies);
1667 
1668 	return 0;
1669 }
1670 
1671 static int cur_wm_latency_show(struct seq_file *m, void *data)
1672 {
1673 	struct drm_i915_private *dev_priv = m->private;
1674 	const u16 *latencies;
1675 
1676 	if (DISPLAY_VER(dev_priv) >= 9)
1677 		latencies = dev_priv->wm.skl_latency;
1678 	else
1679 		latencies = dev_priv->wm.cur_latency;
1680 
1681 	wm_latency_show(m, latencies);
1682 
1683 	return 0;
1684 }
1685 
1686 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1687 {
1688 	struct drm_i915_private *dev_priv = inode->i_private;
1689 
1690 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
1691 		return -ENODEV;
1692 
1693 	return single_open(file, pri_wm_latency_show, dev_priv);
1694 }
1695 
1696 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1697 {
1698 	struct drm_i915_private *dev_priv = inode->i_private;
1699 
1700 	if (HAS_GMCH(dev_priv))
1701 		return -ENODEV;
1702 
1703 	return single_open(file, spr_wm_latency_show, dev_priv);
1704 }
1705 
1706 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1707 {
1708 	struct drm_i915_private *dev_priv = inode->i_private;
1709 
1710 	if (HAS_GMCH(dev_priv))
1711 		return -ENODEV;
1712 
1713 	return single_open(file, cur_wm_latency_show, dev_priv);
1714 }
1715 
1716 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1717 				size_t len, loff_t *offp, u16 wm[8])
1718 {
1719 	struct seq_file *m = file->private_data;
1720 	struct drm_i915_private *dev_priv = m->private;
1721 	struct drm_device *dev = &dev_priv->drm;
1722 	u16 new[8] = { 0 };
1723 	int num_levels;
1724 	int level;
1725 	int ret;
1726 	char tmp[32];
1727 
1728 	if (IS_CHERRYVIEW(dev_priv))
1729 		num_levels = 3;
1730 	else if (IS_VALLEYVIEW(dev_priv))
1731 		num_levels = 1;
1732 	else if (IS_G4X(dev_priv))
1733 		num_levels = 3;
1734 	else
1735 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1736 
1737 	if (len >= sizeof(tmp))
1738 		return -EINVAL;
1739 
1740 	if (copy_from_user(tmp, ubuf, len))
1741 		return -EFAULT;
1742 
1743 	tmp[len] = '\0';
1744 
1745 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1746 		     &new[0], &new[1], &new[2], &new[3],
1747 		     &new[4], &new[5], &new[6], &new[7]);
1748 	if (ret != num_levels)
1749 		return -EINVAL;
1750 
1751 	drm_modeset_lock_all(dev);
1752 
1753 	for (level = 0; level < num_levels; level++)
1754 		wm[level] = new[level];
1755 
1756 	drm_modeset_unlock_all(dev);
1757 
1758 	return len;
1759 }
1760 
1761 
1762 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1763 				    size_t len, loff_t *offp)
1764 {
1765 	struct seq_file *m = file->private_data;
1766 	struct drm_i915_private *dev_priv = m->private;
1767 	u16 *latencies;
1768 
1769 	if (DISPLAY_VER(dev_priv) >= 9)
1770 		latencies = dev_priv->wm.skl_latency;
1771 	else
1772 		latencies = dev_priv->wm.pri_latency;
1773 
1774 	return wm_latency_write(file, ubuf, len, offp, latencies);
1775 }
1776 
1777 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1778 				    size_t len, loff_t *offp)
1779 {
1780 	struct seq_file *m = file->private_data;
1781 	struct drm_i915_private *dev_priv = m->private;
1782 	u16 *latencies;
1783 
1784 	if (DISPLAY_VER(dev_priv) >= 9)
1785 		latencies = dev_priv->wm.skl_latency;
1786 	else
1787 		latencies = dev_priv->wm.spr_latency;
1788 
1789 	return wm_latency_write(file, ubuf, len, offp, latencies);
1790 }
1791 
1792 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1793 				    size_t len, loff_t *offp)
1794 {
1795 	struct seq_file *m = file->private_data;
1796 	struct drm_i915_private *dev_priv = m->private;
1797 	u16 *latencies;
1798 
1799 	if (DISPLAY_VER(dev_priv) >= 9)
1800 		latencies = dev_priv->wm.skl_latency;
1801 	else
1802 		latencies = dev_priv->wm.cur_latency;
1803 
1804 	return wm_latency_write(file, ubuf, len, offp, latencies);
1805 }
1806 
1807 static const struct file_operations i915_pri_wm_latency_fops = {
1808 	.owner = THIS_MODULE,
1809 	.open = pri_wm_latency_open,
1810 	.read = seq_read,
1811 	.llseek = seq_lseek,
1812 	.release = single_release,
1813 	.write = pri_wm_latency_write
1814 };
1815 
1816 static const struct file_operations i915_spr_wm_latency_fops = {
1817 	.owner = THIS_MODULE,
1818 	.open = spr_wm_latency_open,
1819 	.read = seq_read,
1820 	.llseek = seq_lseek,
1821 	.release = single_release,
1822 	.write = spr_wm_latency_write
1823 };
1824 
1825 static const struct file_operations i915_cur_wm_latency_fops = {
1826 	.owner = THIS_MODULE,
1827 	.open = cur_wm_latency_open,
1828 	.read = seq_read,
1829 	.llseek = seq_lseek,
1830 	.release = single_release,
1831 	.write = cur_wm_latency_write
1832 };
1833 
1834 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1835 {
1836 	struct drm_i915_private *dev_priv = m->private;
1837 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1838 
1839 	/* Synchronize with everything first in case there's been an HPD
1840 	 * storm, but we haven't finished handling it in the kernel yet
1841 	 */
1842 	intel_synchronize_irq(dev_priv);
1843 	flush_work(&dev_priv->hotplug.dig_port_work);
1844 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1845 
1846 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1847 	seq_printf(m, "Detected: %s\n",
1848 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1849 
1850 	return 0;
1851 }
1852 
1853 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1854 					const char __user *ubuf, size_t len,
1855 					loff_t *offp)
1856 {
1857 	struct seq_file *m = file->private_data;
1858 	struct drm_i915_private *dev_priv = m->private;
1859 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1860 	unsigned int new_threshold;
1861 	int i;
1862 	char *newline;
1863 	char tmp[16];
1864 
1865 	if (len >= sizeof(tmp))
1866 		return -EINVAL;
1867 
1868 	if (copy_from_user(tmp, ubuf, len))
1869 		return -EFAULT;
1870 
1871 	tmp[len] = '\0';
1872 
1873 	/* Strip newline, if any */
1874 	newline = strchr(tmp, '\n');
1875 	if (newline)
1876 		*newline = '\0';
1877 
1878 	if (strcmp(tmp, "reset") == 0)
1879 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1880 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1881 		return -EINVAL;
1882 
1883 	if (new_threshold > 0)
1884 		drm_dbg_kms(&dev_priv->drm,
1885 			    "Setting HPD storm detection threshold to %d\n",
1886 			    new_threshold);
1887 	else
1888 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1889 
1890 	spin_lock_irq(&dev_priv->irq_lock);
1891 	hotplug->hpd_storm_threshold = new_threshold;
1892 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1893 	for_each_hpd_pin(i)
1894 		hotplug->stats[i].count = 0;
1895 	spin_unlock_irq(&dev_priv->irq_lock);
1896 
1897 	/* Re-enable hpd immediately if we were in an irq storm */
1898 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1899 
1900 	return len;
1901 }
1902 
1903 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1904 {
1905 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1906 }
1907 
1908 static const struct file_operations i915_hpd_storm_ctl_fops = {
1909 	.owner = THIS_MODULE,
1910 	.open = i915_hpd_storm_ctl_open,
1911 	.read = seq_read,
1912 	.llseek = seq_lseek,
1913 	.release = single_release,
1914 	.write = i915_hpd_storm_ctl_write
1915 };
1916 
1917 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1918 {
1919 	struct drm_i915_private *dev_priv = m->private;
1920 
1921 	seq_printf(m, "Enabled: %s\n",
1922 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1923 
1924 	return 0;
1925 }
1926 
1927 static int
1928 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1929 {
1930 	return single_open(file, i915_hpd_short_storm_ctl_show,
1931 			   inode->i_private);
1932 }
1933 
1934 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1935 					      const char __user *ubuf,
1936 					      size_t len, loff_t *offp)
1937 {
1938 	struct seq_file *m = file->private_data;
1939 	struct drm_i915_private *dev_priv = m->private;
1940 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1941 	char *newline;
1942 	char tmp[16];
1943 	int i;
1944 	bool new_state;
1945 
1946 	if (len >= sizeof(tmp))
1947 		return -EINVAL;
1948 
1949 	if (copy_from_user(tmp, ubuf, len))
1950 		return -EFAULT;
1951 
1952 	tmp[len] = '\0';
1953 
1954 	/* Strip newline, if any */
1955 	newline = strchr(tmp, '\n');
1956 	if (newline)
1957 		*newline = '\0';
1958 
1959 	/* Reset to the "default" state for this system */
1960 	if (strcmp(tmp, "reset") == 0)
1961 		new_state = !HAS_DP_MST(dev_priv);
1962 	else if (kstrtobool(tmp, &new_state) != 0)
1963 		return -EINVAL;
1964 
1965 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1966 		    new_state ? "En" : "Dis");
1967 
1968 	spin_lock_irq(&dev_priv->irq_lock);
1969 	hotplug->hpd_short_storm_enabled = new_state;
1970 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1971 	for_each_hpd_pin(i)
1972 		hotplug->stats[i].count = 0;
1973 	spin_unlock_irq(&dev_priv->irq_lock);
1974 
1975 	/* Re-enable hpd immediately if we were in an irq storm */
1976 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1977 
1978 	return len;
1979 }
1980 
1981 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1982 	.owner = THIS_MODULE,
1983 	.open = i915_hpd_short_storm_ctl_open,
1984 	.read = seq_read,
1985 	.llseek = seq_lseek,
1986 	.release = single_release,
1987 	.write = i915_hpd_short_storm_ctl_write,
1988 };
1989 
1990 static int i915_drrs_ctl_set(void *data, u64 val)
1991 {
1992 	struct drm_i915_private *dev_priv = data;
1993 	struct drm_device *dev = &dev_priv->drm;
1994 	struct intel_crtc *crtc;
1995 
1996 	if (DISPLAY_VER(dev_priv) < 7)
1997 		return -ENODEV;
1998 
1999 	for_each_intel_crtc(dev, crtc) {
2000 		struct drm_connector_list_iter conn_iter;
2001 		struct intel_crtc_state *crtc_state;
2002 		struct drm_connector *connector;
2003 		struct drm_crtc_commit *commit;
2004 		int ret;
2005 
2006 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
2007 		if (ret)
2008 			return ret;
2009 
2010 		crtc_state = to_intel_crtc_state(crtc->base.state);
2011 
2012 		if (!crtc_state->hw.active ||
2013 		    !crtc_state->has_drrs)
2014 			goto out;
2015 
2016 		commit = crtc_state->uapi.commit;
2017 		if (commit) {
2018 			ret = wait_for_completion_interruptible(&commit->hw_done);
2019 			if (ret)
2020 				goto out;
2021 		}
2022 
2023 		drm_connector_list_iter_begin(dev, &conn_iter);
2024 		drm_for_each_connector_iter(connector, &conn_iter) {
2025 			struct intel_encoder *encoder;
2026 			struct intel_dp *intel_dp;
2027 
2028 			if (!(crtc_state->uapi.connector_mask &
2029 			      drm_connector_mask(connector)))
2030 				continue;
2031 
2032 			encoder = intel_attached_encoder(to_intel_connector(connector));
2033 			if (encoder->type != INTEL_OUTPUT_EDP)
2034 				continue;
2035 
2036 			drm_dbg(&dev_priv->drm,
2037 				"Manually %sabling DRRS. %llu\n",
2038 				val ? "en" : "dis", val);
2039 
2040 			intel_dp = enc_to_intel_dp(encoder);
2041 			if (val)
2042 				intel_edp_drrs_enable(intel_dp,
2043 						      crtc_state);
2044 			else
2045 				intel_edp_drrs_disable(intel_dp,
2046 						       crtc_state);
2047 		}
2048 		drm_connector_list_iter_end(&conn_iter);
2049 
2050 out:
2051 		drm_modeset_unlock(&crtc->base.mutex);
2052 		if (ret)
2053 			return ret;
2054 	}
2055 
2056 	return 0;
2057 }
2058 
2059 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
2060 
2061 static ssize_t
2062 i915_fifo_underrun_reset_write(struct file *filp,
2063 			       const char __user *ubuf,
2064 			       size_t cnt, loff_t *ppos)
2065 {
2066 	struct drm_i915_private *dev_priv = filp->private_data;
2067 	struct intel_crtc *intel_crtc;
2068 	struct drm_device *dev = &dev_priv->drm;
2069 	int ret;
2070 	bool reset;
2071 
2072 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
2073 	if (ret)
2074 		return ret;
2075 
2076 	if (!reset)
2077 		return cnt;
2078 
2079 	for_each_intel_crtc(dev, intel_crtc) {
2080 		struct drm_crtc_commit *commit;
2081 		struct intel_crtc_state *crtc_state;
2082 
2083 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
2084 		if (ret)
2085 			return ret;
2086 
2087 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
2088 		commit = crtc_state->uapi.commit;
2089 		if (commit) {
2090 			ret = wait_for_completion_interruptible(&commit->hw_done);
2091 			if (!ret)
2092 				ret = wait_for_completion_interruptible(&commit->flip_done);
2093 		}
2094 
2095 		if (!ret && crtc_state->hw.active) {
2096 			drm_dbg_kms(&dev_priv->drm,
2097 				    "Re-arming FIFO underruns on pipe %c\n",
2098 				    pipe_name(intel_crtc->pipe));
2099 
2100 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
2101 		}
2102 
2103 		drm_modeset_unlock(&intel_crtc->base.mutex);
2104 
2105 		if (ret)
2106 			return ret;
2107 	}
2108 
2109 	ret = intel_fbc_reset_underrun(dev_priv);
2110 	if (ret)
2111 		return ret;
2112 
2113 	return cnt;
2114 }
2115 
2116 static const struct file_operations i915_fifo_underrun_reset_ops = {
2117 	.owner = THIS_MODULE,
2118 	.open = simple_open,
2119 	.write = i915_fifo_underrun_reset_write,
2120 	.llseek = default_llseek,
2121 };
2122 
2123 static const struct drm_info_list intel_display_debugfs_list[] = {
2124 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
2125 	{"i915_fbc_status", i915_fbc_status, 0},
2126 	{"i915_ips_status", i915_ips_status, 0},
2127 	{"i915_sr_status", i915_sr_status, 0},
2128 	{"i915_opregion", i915_opregion, 0},
2129 	{"i915_vbt", i915_vbt, 0},
2130 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2131 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
2132 	{"i915_power_domain_info", i915_power_domain_info, 0},
2133 	{"i915_dmc_info", i915_dmc_info, 0},
2134 	{"i915_display_info", i915_display_info, 0},
2135 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
2136 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
2137 	{"i915_ddb_info", i915_ddb_info, 0},
2138 	{"i915_drrs_status", i915_drrs_status, 0},
2139 	{"i915_lpsp_status", i915_lpsp_status, 0},
2140 };
2141 
2142 static const struct {
2143 	const char *name;
2144 	const struct file_operations *fops;
2145 } intel_display_debugfs_files[] = {
2146 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2147 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2148 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2149 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2150 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
2151 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
2152 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
2153 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
2154 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2155 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2156 	{"i915_ipc_status", &i915_ipc_status_fops},
2157 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
2158 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2159 };
2160 
2161 void intel_display_debugfs_register(struct drm_i915_private *i915)
2162 {
2163 	struct drm_minor *minor = i915->drm.primary;
2164 	int i;
2165 
2166 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2167 		debugfs_create_file(intel_display_debugfs_files[i].name,
2168 				    S_IRUGO | S_IWUSR,
2169 				    minor->debugfs_root,
2170 				    to_i915(minor->dev),
2171 				    intel_display_debugfs_files[i].fops);
2172 	}
2173 
2174 	drm_debugfs_create_files(intel_display_debugfs_list,
2175 				 ARRAY_SIZE(intel_display_debugfs_list),
2176 				 minor->debugfs_root, minor);
2177 }
2178 
2179 static int i915_panel_show(struct seq_file *m, void *data)
2180 {
2181 	struct drm_connector *connector = m->private;
2182 	struct intel_dp *intel_dp =
2183 		intel_attached_dp(to_intel_connector(connector));
2184 
2185 	if (connector->status != connector_status_connected)
2186 		return -ENODEV;
2187 
2188 	seq_printf(m, "Panel power up delay: %d\n",
2189 		   intel_dp->pps.panel_power_up_delay);
2190 	seq_printf(m, "Panel power down delay: %d\n",
2191 		   intel_dp->pps.panel_power_down_delay);
2192 	seq_printf(m, "Backlight on delay: %d\n",
2193 		   intel_dp->pps.backlight_on_delay);
2194 	seq_printf(m, "Backlight off delay: %d\n",
2195 		   intel_dp->pps.backlight_off_delay);
2196 
2197 	return 0;
2198 }
2199 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2200 
2201 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2202 {
2203 	struct drm_connector *connector = m->private;
2204 	struct drm_i915_private *i915 = to_i915(connector->dev);
2205 	struct intel_connector *intel_connector = to_intel_connector(connector);
2206 	int ret;
2207 
2208 	ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
2209 	if (ret)
2210 		return ret;
2211 
2212 	if (!connector->encoder || connector->status != connector_status_connected) {
2213 		ret = -ENODEV;
2214 		goto out;
2215 	}
2216 
2217 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2218 		   connector->base.id);
2219 	intel_hdcp_info(m, intel_connector);
2220 
2221 out:
2222 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
2223 
2224 	return ret;
2225 }
2226 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2227 
2228 static int i915_psr_status_show(struct seq_file *m, void *data)
2229 {
2230 	struct drm_connector *connector = m->private;
2231 	struct intel_dp *intel_dp =
2232 		intel_attached_dp(to_intel_connector(connector));
2233 
2234 	return intel_psr_status(m, intel_dp);
2235 }
2236 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2237 
2238 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2239 				seq_puts(m, "LPSP: incapable\n"))
2240 
2241 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2242 {
2243 	struct drm_connector *connector = m->private;
2244 	struct drm_i915_private *i915 = to_i915(connector->dev);
2245 	struct intel_encoder *encoder;
2246 
2247 	encoder = intel_attached_encoder(to_intel_connector(connector));
2248 	if (!encoder)
2249 		return -ENODEV;
2250 
2251 	if (connector->status != connector_status_connected)
2252 		return -ENODEV;
2253 
2254 	switch (DISPLAY_VER(i915)) {
2255 	case 12:
2256 		/*
2257 		 * Actually TGL can drive LPSP on port till DDI_C
2258 		 * but there is no physical connected DDI_C on TGL sku's,
2259 		 * even driver is not initilizing DDI_C port for gen12.
2260 		 */
2261 		LPSP_CAPABLE(encoder->port <= PORT_B);
2262 		break;
2263 	case 11:
2264 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2265 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2266 		break;
2267 	case 10:
2268 	case 9:
2269 		LPSP_CAPABLE(encoder->port == PORT_A &&
2270 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2271 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2272 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2273 		break;
2274 	default:
2275 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2276 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2277 	}
2278 
2279 	return 0;
2280 }
2281 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2282 
2283 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2284 {
2285 	struct drm_connector *connector = m->private;
2286 	struct drm_device *dev = connector->dev;
2287 	struct drm_crtc *crtc;
2288 	struct intel_dp *intel_dp;
2289 	struct drm_modeset_acquire_ctx ctx;
2290 	struct intel_crtc_state *crtc_state = NULL;
2291 	int ret = 0;
2292 	bool try_again = false;
2293 
2294 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2295 
2296 	do {
2297 		try_again = false;
2298 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2299 				       &ctx);
2300 		if (ret) {
2301 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2302 				try_again = true;
2303 				continue;
2304 			}
2305 			break;
2306 		}
2307 		crtc = connector->state->crtc;
2308 		if (connector->status != connector_status_connected || !crtc) {
2309 			ret = -ENODEV;
2310 			break;
2311 		}
2312 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2313 		if (ret == -EDEADLK) {
2314 			ret = drm_modeset_backoff(&ctx);
2315 			if (!ret) {
2316 				try_again = true;
2317 				continue;
2318 			}
2319 			break;
2320 		} else if (ret) {
2321 			break;
2322 		}
2323 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2324 		crtc_state = to_intel_crtc_state(crtc->state);
2325 		seq_printf(m, "DSC_Enabled: %s\n",
2326 			   yesno(crtc_state->dsc.compression_enable));
2327 		seq_printf(m, "DSC_Sink_Support: %s\n",
2328 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2329 		seq_printf(m, "Force_DSC_Enable: %s\n",
2330 			   yesno(intel_dp->force_dsc_en));
2331 		if (!intel_dp_is_edp(intel_dp))
2332 			seq_printf(m, "FEC_Sink_Support: %s\n",
2333 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2334 	} while (try_again);
2335 
2336 	drm_modeset_drop_locks(&ctx);
2337 	drm_modeset_acquire_fini(&ctx);
2338 
2339 	return ret;
2340 }
2341 
2342 static ssize_t i915_dsc_fec_support_write(struct file *file,
2343 					  const char __user *ubuf,
2344 					  size_t len, loff_t *offp)
2345 {
2346 	bool dsc_enable = false;
2347 	int ret;
2348 	struct drm_connector *connector =
2349 		((struct seq_file *)file->private_data)->private;
2350 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2351 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2352 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2353 
2354 	if (len == 0)
2355 		return 0;
2356 
2357 	drm_dbg(&i915->drm,
2358 		"Copied %zu bytes from user to force DSC\n", len);
2359 
2360 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2361 	if (ret < 0)
2362 		return ret;
2363 
2364 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2365 		(dsc_enable) ? "true" : "false");
2366 	intel_dp->force_dsc_en = dsc_enable;
2367 
2368 	*offp += len;
2369 	return len;
2370 }
2371 
2372 static int i915_dsc_fec_support_open(struct inode *inode,
2373 				     struct file *file)
2374 {
2375 	return single_open(file, i915_dsc_fec_support_show,
2376 			   inode->i_private);
2377 }
2378 
2379 static const struct file_operations i915_dsc_fec_support_fops = {
2380 	.owner = THIS_MODULE,
2381 	.open = i915_dsc_fec_support_open,
2382 	.read = seq_read,
2383 	.llseek = seq_lseek,
2384 	.release = single_release,
2385 	.write = i915_dsc_fec_support_write
2386 };
2387 
2388 /**
2389  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2390  * @connector: pointer to a registered drm_connector
2391  *
2392  * Cleanup will be done by drm_connector_unregister() through a call to
2393  * drm_debugfs_connector_remove().
2394  *
2395  * Returns 0 on success, negative error codes on error.
2396  */
2397 int intel_connector_debugfs_add(struct drm_connector *connector)
2398 {
2399 	struct dentry *root = connector->debugfs_entry;
2400 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2401 
2402 	/* The connector must have been registered beforehands. */
2403 	if (!root)
2404 		return -ENODEV;
2405 
2406 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2407 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2408 				    connector, &i915_panel_fops);
2409 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2410 				    connector, &i915_psr_sink_status_fops);
2411 	}
2412 
2413 	if (HAS_PSR(dev_priv) &&
2414 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2415 		debugfs_create_file("i915_psr_status", 0444, root,
2416 				    connector, &i915_psr_status_fops);
2417 	}
2418 
2419 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2420 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2421 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2422 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2423 				    connector, &i915_hdcp_sink_capability_fops);
2424 	}
2425 
2426 	if ((DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) && ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && !to_intel_connector(connector)->mst_port) || connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2427 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2428 				    connector, &i915_dsc_fec_support_fops);
2429 
2430 	/* Legacy panels doesn't lpsp on any platform */
2431 	if ((DISPLAY_VER(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2432 	     IS_BROADWELL(dev_priv)) &&
2433 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2434 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2435 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2436 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2437 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2438 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2439 				    connector, &i915_lpsp_capability_fops);
2440 
2441 	return 0;
2442 }
2443 
2444 /**
2445  * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
2446  * @crtc: pointer to a drm_crtc
2447  *
2448  * Returns 0 on success, negative error codes on error.
2449  *
2450  * Failure to add debugfs entries should generally be ignored.
2451  */
2452 int intel_crtc_debugfs_add(struct drm_crtc *crtc)
2453 {
2454 	if (!crtc->debugfs_entry)
2455 		return -ENODEV;
2456 
2457 	crtc_updates_add(crtc);
2458 	return 0;
2459 }
2460