1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_csr.h"
11 #include "intel_display_debugfs.h"
12 #include "intel_display_power.h"
13 #include "intel_display_types.h"
14 #include "intel_dp.h"
15 #include "intel_fbc.h"
16 #include "intel_hdcp.h"
17 #include "intel_hdmi.h"
18 #include "intel_pm.h"
19 #include "intel_psr.h"
20 #include "intel_sideband.h"
21 #include "intel_sprite.h"
22 
23 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
24 {
25 	return to_i915(node->minor->dev);
26 }
27 
28 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
29 {
30 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
31 
32 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
33 		   dev_priv->fb_tracking.busy_bits);
34 
35 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
36 		   dev_priv->fb_tracking.flip_bits);
37 
38 	return 0;
39 }
40 
41 static int i915_fbc_status(struct seq_file *m, void *unused)
42 {
43 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
44 	struct intel_fbc *fbc = &dev_priv->fbc;
45 	intel_wakeref_t wakeref;
46 
47 	if (!HAS_FBC(dev_priv))
48 		return -ENODEV;
49 
50 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
51 	mutex_lock(&fbc->lock);
52 
53 	if (intel_fbc_is_active(dev_priv))
54 		seq_puts(m, "FBC enabled\n");
55 	else
56 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
57 
58 	if (intel_fbc_is_active(dev_priv)) {
59 		u32 mask;
60 
61 		if (DISPLAY_VER(dev_priv) >= 8)
62 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
63 		else if (DISPLAY_VER(dev_priv) >= 7)
64 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
65 		else if (DISPLAY_VER(dev_priv) >= 5)
66 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
67 		else if (IS_G4X(dev_priv))
68 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
69 		else
70 			mask = intel_de_read(dev_priv, FBC_STATUS) &
71 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
72 
73 		seq_printf(m, "Compressing: %s\n", yesno(mask));
74 	}
75 
76 	mutex_unlock(&fbc->lock);
77 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
78 
79 	return 0;
80 }
81 
82 static int i915_fbc_false_color_get(void *data, u64 *val)
83 {
84 	struct drm_i915_private *dev_priv = data;
85 
86 	if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
87 		return -ENODEV;
88 
89 	*val = dev_priv->fbc.false_color;
90 
91 	return 0;
92 }
93 
94 static int i915_fbc_false_color_set(void *data, u64 val)
95 {
96 	struct drm_i915_private *dev_priv = data;
97 	u32 reg;
98 
99 	if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
100 		return -ENODEV;
101 
102 	mutex_lock(&dev_priv->fbc.lock);
103 
104 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
105 	dev_priv->fbc.false_color = val;
106 
107 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
108 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
109 
110 	mutex_unlock(&dev_priv->fbc.lock);
111 	return 0;
112 }
113 
114 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
115 			i915_fbc_false_color_get, i915_fbc_false_color_set,
116 			"%llu\n");
117 
118 static int i915_ips_status(struct seq_file *m, void *unused)
119 {
120 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
121 	intel_wakeref_t wakeref;
122 
123 	if (!HAS_IPS(dev_priv))
124 		return -ENODEV;
125 
126 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
127 
128 	seq_printf(m, "Enabled by kernel parameter: %s\n",
129 		   yesno(dev_priv->params.enable_ips));
130 
131 	if (DISPLAY_VER(dev_priv) >= 8) {
132 		seq_puts(m, "Currently: unknown\n");
133 	} else {
134 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
135 			seq_puts(m, "Currently: enabled\n");
136 		else
137 			seq_puts(m, "Currently: disabled\n");
138 	}
139 
140 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
141 
142 	return 0;
143 }
144 
145 static int i915_sr_status(struct seq_file *m, void *unused)
146 {
147 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
148 	intel_wakeref_t wakeref;
149 	bool sr_enabled = false;
150 
151 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
152 
153 	if (DISPLAY_VER(dev_priv) >= 9)
154 		/* no global SR status; inspect per-plane WM */;
155 	else if (HAS_PCH_SPLIT(dev_priv))
156 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
157 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
158 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
159 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
160 	else if (IS_I915GM(dev_priv))
161 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
162 	else if (IS_PINEVIEW(dev_priv))
163 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
164 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
165 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
166 
167 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
168 
169 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
170 
171 	return 0;
172 }
173 
174 static int i915_opregion(struct seq_file *m, void *unused)
175 {
176 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
177 
178 	if (opregion->header)
179 		seq_write(m, opregion->header, OPREGION_SIZE);
180 
181 	return 0;
182 }
183 
184 static int i915_vbt(struct seq_file *m, void *unused)
185 {
186 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
187 
188 	if (opregion->vbt)
189 		seq_write(m, opregion->vbt, opregion->vbt_size);
190 
191 	return 0;
192 }
193 
194 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
195 {
196 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
197 	struct drm_device *dev = &dev_priv->drm;
198 	struct intel_framebuffer *fbdev_fb = NULL;
199 	struct drm_framebuffer *drm_fb;
200 
201 #ifdef CONFIG_DRM_FBDEV_EMULATION
202 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
203 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
204 
205 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
206 			   fbdev_fb->base.width,
207 			   fbdev_fb->base.height,
208 			   fbdev_fb->base.format->depth,
209 			   fbdev_fb->base.format->cpp[0] * 8,
210 			   fbdev_fb->base.modifier,
211 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
212 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
213 		seq_putc(m, '\n');
214 	}
215 #endif
216 
217 	mutex_lock(&dev->mode_config.fb_lock);
218 	drm_for_each_fb(drm_fb, dev) {
219 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
220 		if (fb == fbdev_fb)
221 			continue;
222 
223 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
224 			   fb->base.width,
225 			   fb->base.height,
226 			   fb->base.format->depth,
227 			   fb->base.format->cpp[0] * 8,
228 			   fb->base.modifier,
229 			   drm_framebuffer_read_refcount(&fb->base));
230 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
231 		seq_putc(m, '\n');
232 	}
233 	mutex_unlock(&dev->mode_config.fb_lock);
234 
235 	return 0;
236 }
237 
238 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
239 {
240 	u8 val;
241 	static const char * const sink_status[] = {
242 		"inactive",
243 		"transition to active, capture and display",
244 		"active, display from RFB",
245 		"active, capture and display on sink device timings",
246 		"transition to inactive, capture and display, timing re-sync",
247 		"reserved",
248 		"reserved",
249 		"sink internal error",
250 	};
251 	struct drm_connector *connector = m->private;
252 	struct intel_dp *intel_dp =
253 		intel_attached_dp(to_intel_connector(connector));
254 	int ret;
255 
256 	if (!CAN_PSR(intel_dp)) {
257 		seq_puts(m, "PSR Unsupported\n");
258 		return -ENODEV;
259 	}
260 
261 	if (connector->status != connector_status_connected)
262 		return -ENODEV;
263 
264 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
265 
266 	if (ret == 1) {
267 		const char *str = "unknown";
268 
269 		val &= DP_PSR_SINK_STATE_MASK;
270 		if (val < ARRAY_SIZE(sink_status))
271 			str = sink_status[val];
272 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
273 	} else {
274 		return ret;
275 	}
276 
277 	return 0;
278 }
279 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
280 
281 static void
282 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
283 {
284 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
285 	const char *status = "unknown";
286 	u32 val, status_val;
287 
288 	if (intel_dp->psr.psr2_enabled) {
289 		static const char * const live_status[] = {
290 			"IDLE",
291 			"CAPTURE",
292 			"CAPTURE_FS",
293 			"SLEEP",
294 			"BUFON_FW",
295 			"ML_UP",
296 			"SU_STANDBY",
297 			"FAST_SLEEP",
298 			"DEEP_SLEEP",
299 			"BUF_ON",
300 			"TG_ON"
301 		};
302 		val = intel_de_read(dev_priv,
303 				    EDP_PSR2_STATUS(intel_dp->psr.transcoder));
304 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
305 			      EDP_PSR2_STATUS_STATE_SHIFT;
306 		if (status_val < ARRAY_SIZE(live_status))
307 			status = live_status[status_val];
308 	} else {
309 		static const char * const live_status[] = {
310 			"IDLE",
311 			"SRDONACK",
312 			"SRDENT",
313 			"BUFOFF",
314 			"BUFON",
315 			"AUXACK",
316 			"SRDOFFACK",
317 			"SRDENT_ON",
318 		};
319 		val = intel_de_read(dev_priv,
320 				    EDP_PSR_STATUS(intel_dp->psr.transcoder));
321 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
322 			      EDP_PSR_STATUS_STATE_SHIFT;
323 		if (status_val < ARRAY_SIZE(live_status))
324 			status = live_status[status_val];
325 	}
326 
327 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
328 }
329 
330 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
331 {
332 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
333 	struct intel_psr *psr = &intel_dp->psr;
334 	intel_wakeref_t wakeref;
335 	const char *status;
336 	bool enabled;
337 	u32 val;
338 
339 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
340 	if (psr->sink_support)
341 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
342 	seq_puts(m, "\n");
343 
344 	if (!psr->sink_support)
345 		return 0;
346 
347 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
348 	mutex_lock(&psr->lock);
349 
350 	if (psr->enabled)
351 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
352 	else
353 		status = "disabled";
354 	seq_printf(m, "PSR mode: %s\n", status);
355 
356 	if (!psr->enabled) {
357 		seq_printf(m, "PSR sink not reliable: %s\n",
358 			   yesno(psr->sink_not_reliable));
359 
360 		goto unlock;
361 	}
362 
363 	if (psr->psr2_enabled) {
364 		val = intel_de_read(dev_priv,
365 				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
366 		enabled = val & EDP_PSR2_ENABLE;
367 	} else {
368 		val = intel_de_read(dev_priv,
369 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
370 		enabled = val & EDP_PSR_ENABLE;
371 	}
372 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
373 		   enableddisabled(enabled), val);
374 	psr_source_status(intel_dp, m);
375 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
376 		   psr->busy_frontbuffer_bits);
377 
378 	/*
379 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
380 	 */
381 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
382 		val = intel_de_read(dev_priv,
383 				    EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
384 		val &= EDP_PSR_PERF_CNT_MASK;
385 		seq_printf(m, "Performance counter: %u\n", val);
386 	}
387 
388 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
389 		seq_printf(m, "Last attempted entry at: %lld\n",
390 			   psr->last_entry_attempt);
391 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
392 	}
393 
394 	if (psr->psr2_enabled) {
395 		u32 su_frames_val[3];
396 		int frame;
397 
398 		/*
399 		 * Reading all 3 registers before hand to minimize crossing a
400 		 * frame boundary between register reads
401 		 */
402 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
403 			val = intel_de_read(dev_priv,
404 					    PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
405 			su_frames_val[frame / 3] = val;
406 		}
407 
408 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
409 
410 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
411 			u32 su_blocks;
412 
413 			su_blocks = su_frames_val[frame / 3] &
414 				    PSR2_SU_STATUS_MASK(frame);
415 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
416 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
417 		}
418 
419 		seq_printf(m, "PSR2 selective fetch: %s\n",
420 			   enableddisabled(psr->psr2_sel_fetch_enabled));
421 	}
422 
423 unlock:
424 	mutex_unlock(&psr->lock);
425 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
426 
427 	return 0;
428 }
429 
430 static int i915_edp_psr_status(struct seq_file *m, void *data)
431 {
432 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
433 	struct intel_dp *intel_dp = NULL;
434 	struct intel_encoder *encoder;
435 
436 	if (!HAS_PSR(dev_priv))
437 		return -ENODEV;
438 
439 	/* Find the first EDP which supports PSR */
440 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
441 		intel_dp = enc_to_intel_dp(encoder);
442 		break;
443 	}
444 
445 	if (!intel_dp)
446 		return -ENODEV;
447 
448 	return intel_psr_status(m, intel_dp);
449 }
450 
451 static int
452 i915_edp_psr_debug_set(void *data, u64 val)
453 {
454 	struct drm_i915_private *dev_priv = data;
455 	struct intel_encoder *encoder;
456 	intel_wakeref_t wakeref;
457 	int ret = -ENODEV;
458 
459 	if (!HAS_PSR(dev_priv))
460 		return ret;
461 
462 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
463 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
464 
465 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
466 
467 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
468 
469 		// TODO: split to each transcoder's PSR debug state
470 		ret = intel_psr_debug_set(intel_dp, val);
471 
472 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
473 	}
474 
475 	return ret;
476 }
477 
478 static int
479 i915_edp_psr_debug_get(void *data, u64 *val)
480 {
481 	struct drm_i915_private *dev_priv = data;
482 	struct intel_encoder *encoder;
483 
484 	if (!HAS_PSR(dev_priv))
485 		return -ENODEV;
486 
487 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
488 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
489 
490 		// TODO: split to each transcoder's PSR debug state
491 		*val = READ_ONCE(intel_dp->psr.debug);
492 		return 0;
493 	}
494 
495 	return -ENODEV;
496 }
497 
498 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
499 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
500 			"%llu\n");
501 
502 static int i915_power_domain_info(struct seq_file *m, void *unused)
503 {
504 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
505 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
506 	int i;
507 
508 	mutex_lock(&power_domains->lock);
509 
510 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
511 	for (i = 0; i < power_domains->power_well_count; i++) {
512 		struct i915_power_well *power_well;
513 		enum intel_display_power_domain power_domain;
514 
515 		power_well = &power_domains->power_wells[i];
516 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
517 			   power_well->count);
518 
519 		for_each_power_domain(power_domain, power_well->desc->domains)
520 			seq_printf(m, "  %-23s %d\n",
521 				 intel_display_power_domain_str(power_domain),
522 				 power_domains->domain_use_count[power_domain]);
523 	}
524 
525 	mutex_unlock(&power_domains->lock);
526 
527 	return 0;
528 }
529 
530 static int i915_dmc_info(struct seq_file *m, void *unused)
531 {
532 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
533 	intel_wakeref_t wakeref;
534 	struct intel_csr *csr;
535 	i915_reg_t dc5_reg, dc6_reg = {};
536 
537 	if (!HAS_CSR(dev_priv))
538 		return -ENODEV;
539 
540 	csr = &dev_priv->csr;
541 
542 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
543 
544 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
545 	seq_printf(m, "path: %s\n", csr->fw_path);
546 
547 	if (!csr->dmc_payload)
548 		goto out;
549 
550 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
551 		   CSR_VERSION_MINOR(csr->version));
552 
553 	if (DISPLAY_VER(dev_priv) >= 12) {
554 		if (IS_DGFX(dev_priv)) {
555 			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
556 		} else {
557 			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
558 			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
559 		}
560 
561 		/*
562 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
563 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
564 		 * reg for DC3CO debugging and validation,
565 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
566 		 */
567 		seq_printf(m, "DC3CO count: %d\n",
568 			   intel_de_read(dev_priv, DMC_DEBUG3));
569 	} else {
570 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
571 						 SKL_CSR_DC3_DC5_COUNT;
572 		if (!IS_GEN9_LP(dev_priv))
573 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
574 	}
575 
576 	seq_printf(m, "DC3 -> DC5 count: %d\n",
577 		   intel_de_read(dev_priv, dc5_reg));
578 	if (dc6_reg.reg)
579 		seq_printf(m, "DC5 -> DC6 count: %d\n",
580 			   intel_de_read(dev_priv, dc6_reg));
581 
582 out:
583 	seq_printf(m, "program base: 0x%08x\n",
584 		   intel_de_read(dev_priv, CSR_PROGRAM(0)));
585 	seq_printf(m, "ssp base: 0x%08x\n",
586 		   intel_de_read(dev_priv, CSR_SSP_BASE));
587 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
588 
589 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
590 
591 	return 0;
592 }
593 
594 static void intel_seq_print_mode(struct seq_file *m, int tabs,
595 				 const struct drm_display_mode *mode)
596 {
597 	int i;
598 
599 	for (i = 0; i < tabs; i++)
600 		seq_putc(m, '\t');
601 
602 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
603 }
604 
605 static void intel_encoder_info(struct seq_file *m,
606 			       struct intel_crtc *crtc,
607 			       struct intel_encoder *encoder)
608 {
609 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 	struct drm_connector_list_iter conn_iter;
611 	struct drm_connector *connector;
612 
613 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
614 		   encoder->base.base.id, encoder->base.name);
615 
616 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
617 	drm_for_each_connector_iter(connector, &conn_iter) {
618 		const struct drm_connector_state *conn_state =
619 			connector->state;
620 
621 		if (conn_state->best_encoder != &encoder->base)
622 			continue;
623 
624 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
625 			   connector->base.id, connector->name);
626 	}
627 	drm_connector_list_iter_end(&conn_iter);
628 }
629 
630 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
631 {
632 	const struct drm_display_mode *mode = panel->fixed_mode;
633 
634 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
635 }
636 
637 static void intel_hdcp_info(struct seq_file *m,
638 			    struct intel_connector *intel_connector)
639 {
640 	bool hdcp_cap, hdcp2_cap;
641 
642 	if (!intel_connector->hdcp.shim) {
643 		seq_puts(m, "No Connector Support");
644 		goto out;
645 	}
646 
647 	hdcp_cap = intel_hdcp_capable(intel_connector);
648 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
649 
650 	if (hdcp_cap)
651 		seq_puts(m, "HDCP1.4 ");
652 	if (hdcp2_cap)
653 		seq_puts(m, "HDCP2.2 ");
654 
655 	if (!hdcp_cap && !hdcp2_cap)
656 		seq_puts(m, "None");
657 
658 out:
659 	seq_puts(m, "\n");
660 }
661 
662 static void intel_dp_info(struct seq_file *m,
663 			  struct intel_connector *intel_connector)
664 {
665 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
666 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
667 	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
668 
669 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
670 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
671 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
672 		intel_panel_info(m, &intel_connector->panel);
673 
674 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
675 				edid ? edid->data : NULL, &intel_dp->aux);
676 }
677 
678 static void intel_dp_mst_info(struct seq_file *m,
679 			      struct intel_connector *intel_connector)
680 {
681 	bool has_audio = intel_connector->port->has_audio;
682 
683 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
684 }
685 
686 static void intel_hdmi_info(struct seq_file *m,
687 			    struct intel_connector *intel_connector)
688 {
689 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
690 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
691 
692 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
693 }
694 
695 static void intel_lvds_info(struct seq_file *m,
696 			    struct intel_connector *intel_connector)
697 {
698 	intel_panel_info(m, &intel_connector->panel);
699 }
700 
701 static void intel_connector_info(struct seq_file *m,
702 				 struct drm_connector *connector)
703 {
704 	struct intel_connector *intel_connector = to_intel_connector(connector);
705 	const struct drm_connector_state *conn_state = connector->state;
706 	struct intel_encoder *encoder =
707 		to_intel_encoder(conn_state->best_encoder);
708 	const struct drm_display_mode *mode;
709 
710 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
711 		   connector->base.id, connector->name,
712 		   drm_get_connector_status_name(connector->status));
713 
714 	if (connector->status == connector_status_disconnected)
715 		return;
716 
717 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
718 		   connector->display_info.width_mm,
719 		   connector->display_info.height_mm);
720 	seq_printf(m, "\tsubpixel order: %s\n",
721 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
722 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
723 
724 	if (!encoder)
725 		return;
726 
727 	switch (connector->connector_type) {
728 	case DRM_MODE_CONNECTOR_DisplayPort:
729 	case DRM_MODE_CONNECTOR_eDP:
730 		if (encoder->type == INTEL_OUTPUT_DP_MST)
731 			intel_dp_mst_info(m, intel_connector);
732 		else
733 			intel_dp_info(m, intel_connector);
734 		break;
735 	case DRM_MODE_CONNECTOR_LVDS:
736 		if (encoder->type == INTEL_OUTPUT_LVDS)
737 			intel_lvds_info(m, intel_connector);
738 		break;
739 	case DRM_MODE_CONNECTOR_HDMIA:
740 		if (encoder->type == INTEL_OUTPUT_HDMI ||
741 		    encoder->type == INTEL_OUTPUT_DDI)
742 			intel_hdmi_info(m, intel_connector);
743 		break;
744 	default:
745 		break;
746 	}
747 
748 	seq_puts(m, "\tHDCP version: ");
749 	intel_hdcp_info(m, intel_connector);
750 
751 	seq_printf(m, "\tmodes:\n");
752 	list_for_each_entry(mode, &connector->modes, head)
753 		intel_seq_print_mode(m, 2, mode);
754 }
755 
756 static const char *plane_type(enum drm_plane_type type)
757 {
758 	switch (type) {
759 	case DRM_PLANE_TYPE_OVERLAY:
760 		return "OVL";
761 	case DRM_PLANE_TYPE_PRIMARY:
762 		return "PRI";
763 	case DRM_PLANE_TYPE_CURSOR:
764 		return "CUR";
765 	/*
766 	 * Deliberately omitting default: to generate compiler warnings
767 	 * when a new drm_plane_type gets added.
768 	 */
769 	}
770 
771 	return "unknown";
772 }
773 
774 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
775 {
776 	/*
777 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
778 	 * will print them all to visualize if the values are misused
779 	 */
780 	snprintf(buf, bufsize,
781 		 "%s%s%s%s%s%s(0x%08x)",
782 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
783 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
784 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
785 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
786 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
787 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
788 		 rotation);
789 }
790 
791 static const char *plane_visibility(const struct intel_plane_state *plane_state)
792 {
793 	if (plane_state->uapi.visible)
794 		return "visible";
795 
796 	if (plane_state->planar_slave)
797 		return "planar-slave";
798 
799 	return "hidden";
800 }
801 
802 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
803 {
804 	const struct intel_plane_state *plane_state =
805 		to_intel_plane_state(plane->base.state);
806 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
807 	struct drm_rect src, dst;
808 	char rot_str[48];
809 
810 	src = drm_plane_state_src(&plane_state->uapi);
811 	dst = drm_plane_state_dest(&plane_state->uapi);
812 
813 	plane_rotation(rot_str, sizeof(rot_str),
814 		       plane_state->uapi.rotation);
815 
816 	seq_puts(m, "\t\tuapi: [FB:");
817 	if (fb)
818 		seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
819 			   &fb->format->format, fb->modifier, fb->width,
820 			   fb->height);
821 	else
822 		seq_puts(m, "0] n/a,0x0,0x0,");
823 	seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
824 		   ", rotation=%s\n", plane_visibility(plane_state),
825 		   DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
826 
827 	if (plane_state->planar_linked_plane)
828 		seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
829 			   plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
830 			   plane_state->planar_slave ? "slave" : "master");
831 }
832 
833 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
834 {
835 	const struct intel_plane_state *plane_state =
836 		to_intel_plane_state(plane->base.state);
837 	const struct drm_framebuffer *fb = plane_state->hw.fb;
838 	char rot_str[48];
839 
840 	if (!fb)
841 		return;
842 
843 	plane_rotation(rot_str, sizeof(rot_str),
844 		       plane_state->hw.rotation);
845 
846 	seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
847 		   DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
848 		   fb->base.id, &fb->format->format,
849 		   fb->modifier, fb->width, fb->height,
850 		   yesno(plane_state->uapi.visible),
851 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
852 		   DRM_RECT_ARG(&plane_state->uapi.dst),
853 		   rot_str);
854 }
855 
856 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
857 {
858 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
859 	struct intel_plane *plane;
860 
861 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
862 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
863 			   plane->base.base.id, plane->base.name,
864 			   plane_type(plane->base.type));
865 		intel_plane_uapi_info(m, plane);
866 		intel_plane_hw_info(m, plane);
867 	}
868 }
869 
870 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
871 {
872 	const struct intel_crtc_state *crtc_state =
873 		to_intel_crtc_state(crtc->base.state);
874 	int num_scalers = crtc->num_scalers;
875 	int i;
876 
877 	/* Not all platformas have a scaler */
878 	if (num_scalers) {
879 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
880 			   num_scalers,
881 			   crtc_state->scaler_state.scaler_users,
882 			   crtc_state->scaler_state.scaler_id);
883 
884 		for (i = 0; i < num_scalers; i++) {
885 			const struct intel_scaler *sc =
886 				&crtc_state->scaler_state.scalers[i];
887 
888 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
889 				   i, yesno(sc->in_use), sc->mode);
890 		}
891 		seq_puts(m, "\n");
892 	} else {
893 		seq_puts(m, "\tNo scalers available on this platform\n");
894 	}
895 }
896 
897 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
898 static void crtc_updates_info(struct seq_file *m,
899 			      struct intel_crtc *crtc,
900 			      const char *hdr)
901 {
902 	u64 count;
903 	int row;
904 
905 	count = 0;
906 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
907 		count += crtc->debug.vbl.times[row];
908 	seq_printf(m, "%sUpdates: %llu\n", hdr, count);
909 	if (!count)
910 		return;
911 
912 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
913 		char columns[80] = "       |";
914 		unsigned int x;
915 
916 		if (row & 1) {
917 			const char *units;
918 
919 			if (row > 10) {
920 				x = 1000000;
921 				units = "ms";
922 			} else {
923 				x = 1000;
924 				units = "us";
925 			}
926 
927 			snprintf(columns, sizeof(columns), "%4ld%s |",
928 				 DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
929 		}
930 
931 		if (crtc->debug.vbl.times[row]) {
932 			x = ilog2(crtc->debug.vbl.times[row]);
933 			memset(columns + 8, '*', x);
934 			columns[8 + x] = '\0';
935 		}
936 
937 		seq_printf(m, "%s%s\n", hdr, columns);
938 	}
939 
940 	seq_printf(m, "%sMin update: %lluns\n",
941 		   hdr, crtc->debug.vbl.min);
942 	seq_printf(m, "%sMax update: %lluns\n",
943 		   hdr, crtc->debug.vbl.max);
944 	seq_printf(m, "%sAverage update: %lluns\n",
945 		   hdr, div64_u64(crtc->debug.vbl.sum,  count));
946 	seq_printf(m, "%sOverruns > %uus: %u\n",
947 		   hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
948 }
949 
950 static int crtc_updates_show(struct seq_file *m, void *data)
951 {
952 	crtc_updates_info(m, m->private, "");
953 	return 0;
954 }
955 
956 static int crtc_updates_open(struct inode *inode, struct file *file)
957 {
958 	return single_open(file, crtc_updates_show, inode->i_private);
959 }
960 
961 static ssize_t crtc_updates_write(struct file *file,
962 				  const char __user *ubuf,
963 				  size_t len, loff_t *offp)
964 {
965 	struct seq_file *m = file->private_data;
966 	struct intel_crtc *crtc = m->private;
967 
968 	/* May race with an update. Meh. */
969 	memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
970 
971 	return len;
972 }
973 
974 static const struct file_operations crtc_updates_fops = {
975 	.owner = THIS_MODULE,
976 	.open = crtc_updates_open,
977 	.read = seq_read,
978 	.llseek = seq_lseek,
979 	.release = single_release,
980 	.write = crtc_updates_write
981 };
982 
983 static void crtc_updates_add(struct drm_crtc *crtc)
984 {
985 	debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
986 			    to_intel_crtc(crtc), &crtc_updates_fops);
987 }
988 
989 #else
990 static void crtc_updates_info(struct seq_file *m,
991 			      struct intel_crtc *crtc,
992 			      const char *hdr)
993 {
994 }
995 
996 static void crtc_updates_add(struct drm_crtc *crtc)
997 {
998 }
999 #endif
1000 
1001 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
1002 {
1003 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1004 	const struct intel_crtc_state *crtc_state =
1005 		to_intel_crtc_state(crtc->base.state);
1006 	struct intel_encoder *encoder;
1007 
1008 	seq_printf(m, "[CRTC:%d:%s]:\n",
1009 		   crtc->base.base.id, crtc->base.name);
1010 
1011 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
1012 		   yesno(crtc_state->uapi.enable),
1013 		   yesno(crtc_state->uapi.active),
1014 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
1015 
1016 	if (crtc_state->hw.enable) {
1017 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
1018 			   yesno(crtc_state->hw.active),
1019 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
1020 
1021 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
1022 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
1023 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
1024 
1025 		intel_scaler_info(m, crtc);
1026 	}
1027 
1028 	if (crtc_state->bigjoiner)
1029 		seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
1030 			   crtc_state->bigjoiner_linked_crtc->base.base.id,
1031 			   crtc_state->bigjoiner_linked_crtc->base.name,
1032 			   crtc_state->bigjoiner_slave ? "slave" : "master");
1033 
1034 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
1035 				    crtc_state->uapi.encoder_mask)
1036 		intel_encoder_info(m, crtc, encoder);
1037 
1038 	intel_plane_info(m, crtc);
1039 
1040 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
1041 		   yesno(!crtc->cpu_fifo_underrun_disabled),
1042 		   yesno(!crtc->pch_fifo_underrun_disabled));
1043 
1044 	crtc_updates_info(m, crtc, "\t");
1045 }
1046 
1047 static int i915_display_info(struct seq_file *m, void *unused)
1048 {
1049 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1050 	struct drm_device *dev = &dev_priv->drm;
1051 	struct intel_crtc *crtc;
1052 	struct drm_connector *connector;
1053 	struct drm_connector_list_iter conn_iter;
1054 	intel_wakeref_t wakeref;
1055 
1056 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1057 
1058 	drm_modeset_lock_all(dev);
1059 
1060 	seq_printf(m, "CRTC info\n");
1061 	seq_printf(m, "---------\n");
1062 	for_each_intel_crtc(dev, crtc)
1063 		intel_crtc_info(m, crtc);
1064 
1065 	seq_printf(m, "\n");
1066 	seq_printf(m, "Connector info\n");
1067 	seq_printf(m, "--------------\n");
1068 	drm_connector_list_iter_begin(dev, &conn_iter);
1069 	drm_for_each_connector_iter(connector, &conn_iter)
1070 		intel_connector_info(m, connector);
1071 	drm_connector_list_iter_end(&conn_iter);
1072 
1073 	drm_modeset_unlock_all(dev);
1074 
1075 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1076 
1077 	return 0;
1078 }
1079 
1080 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
1081 {
1082 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1083 	struct drm_device *dev = &dev_priv->drm;
1084 	int i;
1085 
1086 	drm_modeset_lock_all(dev);
1087 
1088 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
1089 		   dev_priv->dpll.ref_clks.nssc,
1090 		   dev_priv->dpll.ref_clks.ssc);
1091 
1092 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
1093 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
1094 
1095 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
1096 			   pll->info->id);
1097 		seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
1098 			   pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
1099 		seq_printf(m, " tracked hardware state:\n");
1100 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
1101 		seq_printf(m, " dpll_md: 0x%08x\n",
1102 			   pll->state.hw_state.dpll_md);
1103 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
1104 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
1105 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
1106 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
1107 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
1108 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
1109 			   pll->state.hw_state.mg_refclkin_ctl);
1110 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
1111 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
1112 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
1113 			   pll->state.hw_state.mg_clktop2_hsclkctl);
1114 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
1115 			   pll->state.hw_state.mg_pll_div0);
1116 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
1117 			   pll->state.hw_state.mg_pll_div1);
1118 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
1119 			   pll->state.hw_state.mg_pll_lf);
1120 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
1121 			   pll->state.hw_state.mg_pll_frac_lock);
1122 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
1123 			   pll->state.hw_state.mg_pll_ssc);
1124 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
1125 			   pll->state.hw_state.mg_pll_bias);
1126 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
1127 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
1128 	}
1129 	drm_modeset_unlock_all(dev);
1130 
1131 	return 0;
1132 }
1133 
1134 static int i915_ipc_status_show(struct seq_file *m, void *data)
1135 {
1136 	struct drm_i915_private *dev_priv = m->private;
1137 
1138 	seq_printf(m, "Isochronous Priority Control: %s\n",
1139 			yesno(dev_priv->ipc_enabled));
1140 	return 0;
1141 }
1142 
1143 static int i915_ipc_status_open(struct inode *inode, struct file *file)
1144 {
1145 	struct drm_i915_private *dev_priv = inode->i_private;
1146 
1147 	if (!HAS_IPC(dev_priv))
1148 		return -ENODEV;
1149 
1150 	return single_open(file, i915_ipc_status_show, dev_priv);
1151 }
1152 
1153 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1154 				     size_t len, loff_t *offp)
1155 {
1156 	struct seq_file *m = file->private_data;
1157 	struct drm_i915_private *dev_priv = m->private;
1158 	intel_wakeref_t wakeref;
1159 	bool enable;
1160 	int ret;
1161 
1162 	ret = kstrtobool_from_user(ubuf, len, &enable);
1163 	if (ret < 0)
1164 		return ret;
1165 
1166 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1167 		if (!dev_priv->ipc_enabled && enable)
1168 			drm_info(&dev_priv->drm,
1169 				 "Enabling IPC: WM will be proper only after next commit\n");
1170 		dev_priv->ipc_enabled = enable;
1171 		intel_enable_ipc(dev_priv);
1172 	}
1173 
1174 	return len;
1175 }
1176 
1177 static const struct file_operations i915_ipc_status_fops = {
1178 	.owner = THIS_MODULE,
1179 	.open = i915_ipc_status_open,
1180 	.read = seq_read,
1181 	.llseek = seq_lseek,
1182 	.release = single_release,
1183 	.write = i915_ipc_status_write
1184 };
1185 
1186 static int i915_ddb_info(struct seq_file *m, void *unused)
1187 {
1188 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1189 	struct drm_device *dev = &dev_priv->drm;
1190 	struct skl_ddb_entry *entry;
1191 	struct intel_crtc *crtc;
1192 
1193 	if (DISPLAY_VER(dev_priv) < 9)
1194 		return -ENODEV;
1195 
1196 	drm_modeset_lock_all(dev);
1197 
1198 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1199 
1200 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1201 		struct intel_crtc_state *crtc_state =
1202 			to_intel_crtc_state(crtc->base.state);
1203 		enum pipe pipe = crtc->pipe;
1204 		enum plane_id plane_id;
1205 
1206 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1207 
1208 		for_each_plane_id_on_crtc(crtc, plane_id) {
1209 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1210 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1211 				   entry->start, entry->end,
1212 				   skl_ddb_entry_size(entry));
1213 		}
1214 
1215 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1216 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1217 			   entry->end, skl_ddb_entry_size(entry));
1218 	}
1219 
1220 	drm_modeset_unlock_all(dev);
1221 
1222 	return 0;
1223 }
1224 
1225 static void drrs_status_per_crtc(struct seq_file *m,
1226 				 struct drm_device *dev,
1227 				 struct intel_crtc *intel_crtc)
1228 {
1229 	struct drm_i915_private *dev_priv = to_i915(dev);
1230 	struct i915_drrs *drrs = &dev_priv->drrs;
1231 	int vrefresh = 0;
1232 	struct drm_connector *connector;
1233 	struct drm_connector_list_iter conn_iter;
1234 
1235 	drm_connector_list_iter_begin(dev, &conn_iter);
1236 	drm_for_each_connector_iter(connector, &conn_iter) {
1237 		bool supported = false;
1238 
1239 		if (connector->state->crtc != &intel_crtc->base)
1240 			continue;
1241 
1242 		seq_printf(m, "%s:\n", connector->name);
1243 
1244 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1245 		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1246 			supported = true;
1247 
1248 		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1249 	}
1250 	drm_connector_list_iter_end(&conn_iter);
1251 
1252 	seq_puts(m, "\n");
1253 
1254 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
1255 		struct intel_panel *panel;
1256 
1257 		mutex_lock(&drrs->mutex);
1258 		/* DRRS Supported */
1259 		seq_puts(m, "\tDRRS Enabled: Yes\n");
1260 
1261 		/* disable_drrs() will make drrs->dp NULL */
1262 		if (!drrs->dp) {
1263 			seq_puts(m, "Idleness DRRS: Disabled\n");
1264 			mutex_unlock(&drrs->mutex);
1265 			return;
1266 		}
1267 
1268 		panel = &drrs->dp->attached_connector->panel;
1269 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1270 					drrs->busy_frontbuffer_bits);
1271 
1272 		seq_puts(m, "\n\t\t");
1273 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1274 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1275 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1276 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1277 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1278 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1279 		} else {
1280 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1281 						drrs->refresh_rate_type);
1282 			mutex_unlock(&drrs->mutex);
1283 			return;
1284 		}
1285 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1286 
1287 		seq_puts(m, "\n\t\t");
1288 		mutex_unlock(&drrs->mutex);
1289 	} else {
1290 		/* DRRS not supported. Print the VBT parameter*/
1291 		seq_puts(m, "\tDRRS Enabled : No");
1292 	}
1293 	seq_puts(m, "\n");
1294 }
1295 
1296 static int i915_drrs_status(struct seq_file *m, void *unused)
1297 {
1298 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1299 	struct drm_device *dev = &dev_priv->drm;
1300 	struct intel_crtc *intel_crtc;
1301 	int active_crtc_cnt = 0;
1302 
1303 	drm_modeset_lock_all(dev);
1304 	for_each_intel_crtc(dev, intel_crtc) {
1305 		if (intel_crtc->base.state->active) {
1306 			active_crtc_cnt++;
1307 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1308 
1309 			drrs_status_per_crtc(m, dev, intel_crtc);
1310 		}
1311 	}
1312 	drm_modeset_unlock_all(dev);
1313 
1314 	if (!active_crtc_cnt)
1315 		seq_puts(m, "No active crtc found\n");
1316 
1317 	return 0;
1318 }
1319 
1320 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1321 				seq_puts(m, "LPSP: disabled\n"))
1322 
1323 static bool
1324 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1325 			      enum i915_power_well_id power_well_id)
1326 {
1327 	intel_wakeref_t wakeref;
1328 	bool is_enabled;
1329 
1330 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1331 	is_enabled = intel_display_power_well_is_enabled(i915,
1332 							 power_well_id);
1333 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1334 
1335 	return is_enabled;
1336 }
1337 
1338 static int i915_lpsp_status(struct seq_file *m, void *unused)
1339 {
1340 	struct drm_i915_private *i915 = node_to_i915(m->private);
1341 
1342 	switch (DISPLAY_VER(i915)) {
1343 	case 12:
1344 	case 11:
1345 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1346 		break;
1347 	case 10:
1348 	case 9:
1349 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1350 		break;
1351 	default:
1352 		/*
1353 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1354 		 * support lpsp.
1355 		 */
1356 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1357 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1358 		else
1359 			seq_puts(m, "LPSP: not supported\n");
1360 	}
1361 
1362 	return 0;
1363 }
1364 
1365 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1366 {
1367 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1368 	struct drm_device *dev = &dev_priv->drm;
1369 	struct intel_encoder *intel_encoder;
1370 	struct intel_digital_port *dig_port;
1371 	struct drm_connector *connector;
1372 	struct drm_connector_list_iter conn_iter;
1373 
1374 	drm_connector_list_iter_begin(dev, &conn_iter);
1375 	drm_for_each_connector_iter(connector, &conn_iter) {
1376 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1377 			continue;
1378 
1379 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1380 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1381 			continue;
1382 
1383 		dig_port = enc_to_dig_port(intel_encoder);
1384 		if (!dig_port->dp.can_mst)
1385 			continue;
1386 
1387 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1388 			   dig_port->base.base.base.id,
1389 			   dig_port->base.base.name);
1390 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1391 	}
1392 	drm_connector_list_iter_end(&conn_iter);
1393 
1394 	return 0;
1395 }
1396 
1397 static ssize_t i915_displayport_test_active_write(struct file *file,
1398 						  const char __user *ubuf,
1399 						  size_t len, loff_t *offp)
1400 {
1401 	char *input_buffer;
1402 	int status = 0;
1403 	struct drm_device *dev;
1404 	struct drm_connector *connector;
1405 	struct drm_connector_list_iter conn_iter;
1406 	struct intel_dp *intel_dp;
1407 	int val = 0;
1408 
1409 	dev = ((struct seq_file *)file->private_data)->private;
1410 
1411 	if (len == 0)
1412 		return 0;
1413 
1414 	input_buffer = memdup_user_nul(ubuf, len);
1415 	if (IS_ERR(input_buffer))
1416 		return PTR_ERR(input_buffer);
1417 
1418 	drm_dbg(&to_i915(dev)->drm,
1419 		"Copied %d bytes from user\n", (unsigned int)len);
1420 
1421 	drm_connector_list_iter_begin(dev, &conn_iter);
1422 	drm_for_each_connector_iter(connector, &conn_iter) {
1423 		struct intel_encoder *encoder;
1424 
1425 		if (connector->connector_type !=
1426 		    DRM_MODE_CONNECTOR_DisplayPort)
1427 			continue;
1428 
1429 		encoder = to_intel_encoder(connector->encoder);
1430 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1431 			continue;
1432 
1433 		if (encoder && connector->status == connector_status_connected) {
1434 			intel_dp = enc_to_intel_dp(encoder);
1435 			status = kstrtoint(input_buffer, 10, &val);
1436 			if (status < 0)
1437 				break;
1438 			drm_dbg(&to_i915(dev)->drm,
1439 				"Got %d for test active\n", val);
1440 			/* To prevent erroneous activation of the compliance
1441 			 * testing code, only accept an actual value of 1 here
1442 			 */
1443 			if (val == 1)
1444 				intel_dp->compliance.test_active = true;
1445 			else
1446 				intel_dp->compliance.test_active = false;
1447 		}
1448 	}
1449 	drm_connector_list_iter_end(&conn_iter);
1450 	kfree(input_buffer);
1451 	if (status < 0)
1452 		return status;
1453 
1454 	*offp += len;
1455 	return len;
1456 }
1457 
1458 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1459 {
1460 	struct drm_i915_private *dev_priv = m->private;
1461 	struct drm_device *dev = &dev_priv->drm;
1462 	struct drm_connector *connector;
1463 	struct drm_connector_list_iter conn_iter;
1464 	struct intel_dp *intel_dp;
1465 
1466 	drm_connector_list_iter_begin(dev, &conn_iter);
1467 	drm_for_each_connector_iter(connector, &conn_iter) {
1468 		struct intel_encoder *encoder;
1469 
1470 		if (connector->connector_type !=
1471 		    DRM_MODE_CONNECTOR_DisplayPort)
1472 			continue;
1473 
1474 		encoder = to_intel_encoder(connector->encoder);
1475 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1476 			continue;
1477 
1478 		if (encoder && connector->status == connector_status_connected) {
1479 			intel_dp = enc_to_intel_dp(encoder);
1480 			if (intel_dp->compliance.test_active)
1481 				seq_puts(m, "1");
1482 			else
1483 				seq_puts(m, "0");
1484 		} else
1485 			seq_puts(m, "0");
1486 	}
1487 	drm_connector_list_iter_end(&conn_iter);
1488 
1489 	return 0;
1490 }
1491 
1492 static int i915_displayport_test_active_open(struct inode *inode,
1493 					     struct file *file)
1494 {
1495 	return single_open(file, i915_displayport_test_active_show,
1496 			   inode->i_private);
1497 }
1498 
1499 static const struct file_operations i915_displayport_test_active_fops = {
1500 	.owner = THIS_MODULE,
1501 	.open = i915_displayport_test_active_open,
1502 	.read = seq_read,
1503 	.llseek = seq_lseek,
1504 	.release = single_release,
1505 	.write = i915_displayport_test_active_write
1506 };
1507 
1508 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1509 {
1510 	struct drm_i915_private *dev_priv = m->private;
1511 	struct drm_device *dev = &dev_priv->drm;
1512 	struct drm_connector *connector;
1513 	struct drm_connector_list_iter conn_iter;
1514 	struct intel_dp *intel_dp;
1515 
1516 	drm_connector_list_iter_begin(dev, &conn_iter);
1517 	drm_for_each_connector_iter(connector, &conn_iter) {
1518 		struct intel_encoder *encoder;
1519 
1520 		if (connector->connector_type !=
1521 		    DRM_MODE_CONNECTOR_DisplayPort)
1522 			continue;
1523 
1524 		encoder = to_intel_encoder(connector->encoder);
1525 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1526 			continue;
1527 
1528 		if (encoder && connector->status == connector_status_connected) {
1529 			intel_dp = enc_to_intel_dp(encoder);
1530 			if (intel_dp->compliance.test_type ==
1531 			    DP_TEST_LINK_EDID_READ)
1532 				seq_printf(m, "%lx",
1533 					   intel_dp->compliance.test_data.edid);
1534 			else if (intel_dp->compliance.test_type ==
1535 				 DP_TEST_LINK_VIDEO_PATTERN) {
1536 				seq_printf(m, "hdisplay: %d\n",
1537 					   intel_dp->compliance.test_data.hdisplay);
1538 				seq_printf(m, "vdisplay: %d\n",
1539 					   intel_dp->compliance.test_data.vdisplay);
1540 				seq_printf(m, "bpc: %u\n",
1541 					   intel_dp->compliance.test_data.bpc);
1542 			} else if (intel_dp->compliance.test_type ==
1543 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1544 				seq_printf(m, "pattern: %d\n",
1545 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1546 				seq_printf(m, "Number of lanes: %d\n",
1547 					   intel_dp->compliance.test_data.phytest.num_lanes);
1548 				seq_printf(m, "Link Rate: %d\n",
1549 					   intel_dp->compliance.test_data.phytest.link_rate);
1550 				seq_printf(m, "level: %02x\n",
1551 					   intel_dp->train_set[0]);
1552 			}
1553 		} else
1554 			seq_puts(m, "0");
1555 	}
1556 	drm_connector_list_iter_end(&conn_iter);
1557 
1558 	return 0;
1559 }
1560 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1561 
1562 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1563 {
1564 	struct drm_i915_private *dev_priv = m->private;
1565 	struct drm_device *dev = &dev_priv->drm;
1566 	struct drm_connector *connector;
1567 	struct drm_connector_list_iter conn_iter;
1568 	struct intel_dp *intel_dp;
1569 
1570 	drm_connector_list_iter_begin(dev, &conn_iter);
1571 	drm_for_each_connector_iter(connector, &conn_iter) {
1572 		struct intel_encoder *encoder;
1573 
1574 		if (connector->connector_type !=
1575 		    DRM_MODE_CONNECTOR_DisplayPort)
1576 			continue;
1577 
1578 		encoder = to_intel_encoder(connector->encoder);
1579 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1580 			continue;
1581 
1582 		if (encoder && connector->status == connector_status_connected) {
1583 			intel_dp = enc_to_intel_dp(encoder);
1584 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1585 		} else
1586 			seq_puts(m, "0");
1587 	}
1588 	drm_connector_list_iter_end(&conn_iter);
1589 
1590 	return 0;
1591 }
1592 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1593 
1594 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1595 {
1596 	struct drm_i915_private *dev_priv = m->private;
1597 	struct drm_device *dev = &dev_priv->drm;
1598 	int level;
1599 	int num_levels;
1600 
1601 	if (IS_CHERRYVIEW(dev_priv))
1602 		num_levels = 3;
1603 	else if (IS_VALLEYVIEW(dev_priv))
1604 		num_levels = 1;
1605 	else if (IS_G4X(dev_priv))
1606 		num_levels = 3;
1607 	else
1608 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1609 
1610 	drm_modeset_lock_all(dev);
1611 
1612 	for (level = 0; level < num_levels; level++) {
1613 		unsigned int latency = wm[level];
1614 
1615 		/*
1616 		 * - WM1+ latency values in 0.5us units
1617 		 * - latencies are in us on gen9/vlv/chv
1618 		 */
1619 		if (DISPLAY_VER(dev_priv) >= 9 ||
1620 		    IS_VALLEYVIEW(dev_priv) ||
1621 		    IS_CHERRYVIEW(dev_priv) ||
1622 		    IS_G4X(dev_priv))
1623 			latency *= 10;
1624 		else if (level > 0)
1625 			latency *= 5;
1626 
1627 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1628 			   level, wm[level], latency / 10, latency % 10);
1629 	}
1630 
1631 	drm_modeset_unlock_all(dev);
1632 }
1633 
1634 static int pri_wm_latency_show(struct seq_file *m, void *data)
1635 {
1636 	struct drm_i915_private *dev_priv = m->private;
1637 	const u16 *latencies;
1638 
1639 	if (DISPLAY_VER(dev_priv) >= 9)
1640 		latencies = dev_priv->wm.skl_latency;
1641 	else
1642 		latencies = dev_priv->wm.pri_latency;
1643 
1644 	wm_latency_show(m, latencies);
1645 
1646 	return 0;
1647 }
1648 
1649 static int spr_wm_latency_show(struct seq_file *m, void *data)
1650 {
1651 	struct drm_i915_private *dev_priv = m->private;
1652 	const u16 *latencies;
1653 
1654 	if (DISPLAY_VER(dev_priv) >= 9)
1655 		latencies = dev_priv->wm.skl_latency;
1656 	else
1657 		latencies = dev_priv->wm.spr_latency;
1658 
1659 	wm_latency_show(m, latencies);
1660 
1661 	return 0;
1662 }
1663 
1664 static int cur_wm_latency_show(struct seq_file *m, void *data)
1665 {
1666 	struct drm_i915_private *dev_priv = m->private;
1667 	const u16 *latencies;
1668 
1669 	if (DISPLAY_VER(dev_priv) >= 9)
1670 		latencies = dev_priv->wm.skl_latency;
1671 	else
1672 		latencies = dev_priv->wm.cur_latency;
1673 
1674 	wm_latency_show(m, latencies);
1675 
1676 	return 0;
1677 }
1678 
1679 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1680 {
1681 	struct drm_i915_private *dev_priv = inode->i_private;
1682 
1683 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
1684 		return -ENODEV;
1685 
1686 	return single_open(file, pri_wm_latency_show, dev_priv);
1687 }
1688 
1689 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1690 {
1691 	struct drm_i915_private *dev_priv = inode->i_private;
1692 
1693 	if (HAS_GMCH(dev_priv))
1694 		return -ENODEV;
1695 
1696 	return single_open(file, spr_wm_latency_show, dev_priv);
1697 }
1698 
1699 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1700 {
1701 	struct drm_i915_private *dev_priv = inode->i_private;
1702 
1703 	if (HAS_GMCH(dev_priv))
1704 		return -ENODEV;
1705 
1706 	return single_open(file, cur_wm_latency_show, dev_priv);
1707 }
1708 
1709 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1710 				size_t len, loff_t *offp, u16 wm[8])
1711 {
1712 	struct seq_file *m = file->private_data;
1713 	struct drm_i915_private *dev_priv = m->private;
1714 	struct drm_device *dev = &dev_priv->drm;
1715 	u16 new[8] = { 0 };
1716 	int num_levels;
1717 	int level;
1718 	int ret;
1719 	char tmp[32];
1720 
1721 	if (IS_CHERRYVIEW(dev_priv))
1722 		num_levels = 3;
1723 	else if (IS_VALLEYVIEW(dev_priv))
1724 		num_levels = 1;
1725 	else if (IS_G4X(dev_priv))
1726 		num_levels = 3;
1727 	else
1728 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1729 
1730 	if (len >= sizeof(tmp))
1731 		return -EINVAL;
1732 
1733 	if (copy_from_user(tmp, ubuf, len))
1734 		return -EFAULT;
1735 
1736 	tmp[len] = '\0';
1737 
1738 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1739 		     &new[0], &new[1], &new[2], &new[3],
1740 		     &new[4], &new[5], &new[6], &new[7]);
1741 	if (ret != num_levels)
1742 		return -EINVAL;
1743 
1744 	drm_modeset_lock_all(dev);
1745 
1746 	for (level = 0; level < num_levels; level++)
1747 		wm[level] = new[level];
1748 
1749 	drm_modeset_unlock_all(dev);
1750 
1751 	return len;
1752 }
1753 
1754 
1755 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1756 				    size_t len, loff_t *offp)
1757 {
1758 	struct seq_file *m = file->private_data;
1759 	struct drm_i915_private *dev_priv = m->private;
1760 	u16 *latencies;
1761 
1762 	if (DISPLAY_VER(dev_priv) >= 9)
1763 		latencies = dev_priv->wm.skl_latency;
1764 	else
1765 		latencies = dev_priv->wm.pri_latency;
1766 
1767 	return wm_latency_write(file, ubuf, len, offp, latencies);
1768 }
1769 
1770 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1771 				    size_t len, loff_t *offp)
1772 {
1773 	struct seq_file *m = file->private_data;
1774 	struct drm_i915_private *dev_priv = m->private;
1775 	u16 *latencies;
1776 
1777 	if (DISPLAY_VER(dev_priv) >= 9)
1778 		latencies = dev_priv->wm.skl_latency;
1779 	else
1780 		latencies = dev_priv->wm.spr_latency;
1781 
1782 	return wm_latency_write(file, ubuf, len, offp, latencies);
1783 }
1784 
1785 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1786 				    size_t len, loff_t *offp)
1787 {
1788 	struct seq_file *m = file->private_data;
1789 	struct drm_i915_private *dev_priv = m->private;
1790 	u16 *latencies;
1791 
1792 	if (DISPLAY_VER(dev_priv) >= 9)
1793 		latencies = dev_priv->wm.skl_latency;
1794 	else
1795 		latencies = dev_priv->wm.cur_latency;
1796 
1797 	return wm_latency_write(file, ubuf, len, offp, latencies);
1798 }
1799 
1800 static const struct file_operations i915_pri_wm_latency_fops = {
1801 	.owner = THIS_MODULE,
1802 	.open = pri_wm_latency_open,
1803 	.read = seq_read,
1804 	.llseek = seq_lseek,
1805 	.release = single_release,
1806 	.write = pri_wm_latency_write
1807 };
1808 
1809 static const struct file_operations i915_spr_wm_latency_fops = {
1810 	.owner = THIS_MODULE,
1811 	.open = spr_wm_latency_open,
1812 	.read = seq_read,
1813 	.llseek = seq_lseek,
1814 	.release = single_release,
1815 	.write = spr_wm_latency_write
1816 };
1817 
1818 static const struct file_operations i915_cur_wm_latency_fops = {
1819 	.owner = THIS_MODULE,
1820 	.open = cur_wm_latency_open,
1821 	.read = seq_read,
1822 	.llseek = seq_lseek,
1823 	.release = single_release,
1824 	.write = cur_wm_latency_write
1825 };
1826 
1827 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1828 {
1829 	struct drm_i915_private *dev_priv = m->private;
1830 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1831 
1832 	/* Synchronize with everything first in case there's been an HPD
1833 	 * storm, but we haven't finished handling it in the kernel yet
1834 	 */
1835 	intel_synchronize_irq(dev_priv);
1836 	flush_work(&dev_priv->hotplug.dig_port_work);
1837 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1838 
1839 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1840 	seq_printf(m, "Detected: %s\n",
1841 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1842 
1843 	return 0;
1844 }
1845 
1846 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1847 					const char __user *ubuf, size_t len,
1848 					loff_t *offp)
1849 {
1850 	struct seq_file *m = file->private_data;
1851 	struct drm_i915_private *dev_priv = m->private;
1852 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1853 	unsigned int new_threshold;
1854 	int i;
1855 	char *newline;
1856 	char tmp[16];
1857 
1858 	if (len >= sizeof(tmp))
1859 		return -EINVAL;
1860 
1861 	if (copy_from_user(tmp, ubuf, len))
1862 		return -EFAULT;
1863 
1864 	tmp[len] = '\0';
1865 
1866 	/* Strip newline, if any */
1867 	newline = strchr(tmp, '\n');
1868 	if (newline)
1869 		*newline = '\0';
1870 
1871 	if (strcmp(tmp, "reset") == 0)
1872 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1873 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1874 		return -EINVAL;
1875 
1876 	if (new_threshold > 0)
1877 		drm_dbg_kms(&dev_priv->drm,
1878 			    "Setting HPD storm detection threshold to %d\n",
1879 			    new_threshold);
1880 	else
1881 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1882 
1883 	spin_lock_irq(&dev_priv->irq_lock);
1884 	hotplug->hpd_storm_threshold = new_threshold;
1885 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1886 	for_each_hpd_pin(i)
1887 		hotplug->stats[i].count = 0;
1888 	spin_unlock_irq(&dev_priv->irq_lock);
1889 
1890 	/* Re-enable hpd immediately if we were in an irq storm */
1891 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1892 
1893 	return len;
1894 }
1895 
1896 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1897 {
1898 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1899 }
1900 
1901 static const struct file_operations i915_hpd_storm_ctl_fops = {
1902 	.owner = THIS_MODULE,
1903 	.open = i915_hpd_storm_ctl_open,
1904 	.read = seq_read,
1905 	.llseek = seq_lseek,
1906 	.release = single_release,
1907 	.write = i915_hpd_storm_ctl_write
1908 };
1909 
1910 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1911 {
1912 	struct drm_i915_private *dev_priv = m->private;
1913 
1914 	seq_printf(m, "Enabled: %s\n",
1915 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1916 
1917 	return 0;
1918 }
1919 
1920 static int
1921 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1922 {
1923 	return single_open(file, i915_hpd_short_storm_ctl_show,
1924 			   inode->i_private);
1925 }
1926 
1927 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1928 					      const char __user *ubuf,
1929 					      size_t len, loff_t *offp)
1930 {
1931 	struct seq_file *m = file->private_data;
1932 	struct drm_i915_private *dev_priv = m->private;
1933 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1934 	char *newline;
1935 	char tmp[16];
1936 	int i;
1937 	bool new_state;
1938 
1939 	if (len >= sizeof(tmp))
1940 		return -EINVAL;
1941 
1942 	if (copy_from_user(tmp, ubuf, len))
1943 		return -EFAULT;
1944 
1945 	tmp[len] = '\0';
1946 
1947 	/* Strip newline, if any */
1948 	newline = strchr(tmp, '\n');
1949 	if (newline)
1950 		*newline = '\0';
1951 
1952 	/* Reset to the "default" state for this system */
1953 	if (strcmp(tmp, "reset") == 0)
1954 		new_state = !HAS_DP_MST(dev_priv);
1955 	else if (kstrtobool(tmp, &new_state) != 0)
1956 		return -EINVAL;
1957 
1958 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1959 		    new_state ? "En" : "Dis");
1960 
1961 	spin_lock_irq(&dev_priv->irq_lock);
1962 	hotplug->hpd_short_storm_enabled = new_state;
1963 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1964 	for_each_hpd_pin(i)
1965 		hotplug->stats[i].count = 0;
1966 	spin_unlock_irq(&dev_priv->irq_lock);
1967 
1968 	/* Re-enable hpd immediately if we were in an irq storm */
1969 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1970 
1971 	return len;
1972 }
1973 
1974 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1975 	.owner = THIS_MODULE,
1976 	.open = i915_hpd_short_storm_ctl_open,
1977 	.read = seq_read,
1978 	.llseek = seq_lseek,
1979 	.release = single_release,
1980 	.write = i915_hpd_short_storm_ctl_write,
1981 };
1982 
1983 static int i915_drrs_ctl_set(void *data, u64 val)
1984 {
1985 	struct drm_i915_private *dev_priv = data;
1986 	struct drm_device *dev = &dev_priv->drm;
1987 	struct intel_crtc *crtc;
1988 
1989 	if (DISPLAY_VER(dev_priv) < 7)
1990 		return -ENODEV;
1991 
1992 	for_each_intel_crtc(dev, crtc) {
1993 		struct drm_connector_list_iter conn_iter;
1994 		struct intel_crtc_state *crtc_state;
1995 		struct drm_connector *connector;
1996 		struct drm_crtc_commit *commit;
1997 		int ret;
1998 
1999 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
2000 		if (ret)
2001 			return ret;
2002 
2003 		crtc_state = to_intel_crtc_state(crtc->base.state);
2004 
2005 		if (!crtc_state->hw.active ||
2006 		    !crtc_state->has_drrs)
2007 			goto out;
2008 
2009 		commit = crtc_state->uapi.commit;
2010 		if (commit) {
2011 			ret = wait_for_completion_interruptible(&commit->hw_done);
2012 			if (ret)
2013 				goto out;
2014 		}
2015 
2016 		drm_connector_list_iter_begin(dev, &conn_iter);
2017 		drm_for_each_connector_iter(connector, &conn_iter) {
2018 			struct intel_encoder *encoder;
2019 			struct intel_dp *intel_dp;
2020 
2021 			if (!(crtc_state->uapi.connector_mask &
2022 			      drm_connector_mask(connector)))
2023 				continue;
2024 
2025 			encoder = intel_attached_encoder(to_intel_connector(connector));
2026 			if (encoder->type != INTEL_OUTPUT_EDP)
2027 				continue;
2028 
2029 			drm_dbg(&dev_priv->drm,
2030 				"Manually %sabling DRRS. %llu\n",
2031 				val ? "en" : "dis", val);
2032 
2033 			intel_dp = enc_to_intel_dp(encoder);
2034 			if (val)
2035 				intel_edp_drrs_enable(intel_dp,
2036 						      crtc_state);
2037 			else
2038 				intel_edp_drrs_disable(intel_dp,
2039 						       crtc_state);
2040 		}
2041 		drm_connector_list_iter_end(&conn_iter);
2042 
2043 out:
2044 		drm_modeset_unlock(&crtc->base.mutex);
2045 		if (ret)
2046 			return ret;
2047 	}
2048 
2049 	return 0;
2050 }
2051 
2052 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
2053 
2054 static ssize_t
2055 i915_fifo_underrun_reset_write(struct file *filp,
2056 			       const char __user *ubuf,
2057 			       size_t cnt, loff_t *ppos)
2058 {
2059 	struct drm_i915_private *dev_priv = filp->private_data;
2060 	struct intel_crtc *intel_crtc;
2061 	struct drm_device *dev = &dev_priv->drm;
2062 	int ret;
2063 	bool reset;
2064 
2065 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
2066 	if (ret)
2067 		return ret;
2068 
2069 	if (!reset)
2070 		return cnt;
2071 
2072 	for_each_intel_crtc(dev, intel_crtc) {
2073 		struct drm_crtc_commit *commit;
2074 		struct intel_crtc_state *crtc_state;
2075 
2076 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
2077 		if (ret)
2078 			return ret;
2079 
2080 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
2081 		commit = crtc_state->uapi.commit;
2082 		if (commit) {
2083 			ret = wait_for_completion_interruptible(&commit->hw_done);
2084 			if (!ret)
2085 				ret = wait_for_completion_interruptible(&commit->flip_done);
2086 		}
2087 
2088 		if (!ret && crtc_state->hw.active) {
2089 			drm_dbg_kms(&dev_priv->drm,
2090 				    "Re-arming FIFO underruns on pipe %c\n",
2091 				    pipe_name(intel_crtc->pipe));
2092 
2093 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
2094 		}
2095 
2096 		drm_modeset_unlock(&intel_crtc->base.mutex);
2097 
2098 		if (ret)
2099 			return ret;
2100 	}
2101 
2102 	ret = intel_fbc_reset_underrun(dev_priv);
2103 	if (ret)
2104 		return ret;
2105 
2106 	return cnt;
2107 }
2108 
2109 static const struct file_operations i915_fifo_underrun_reset_ops = {
2110 	.owner = THIS_MODULE,
2111 	.open = simple_open,
2112 	.write = i915_fifo_underrun_reset_write,
2113 	.llseek = default_llseek,
2114 };
2115 
2116 static const struct drm_info_list intel_display_debugfs_list[] = {
2117 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
2118 	{"i915_fbc_status", i915_fbc_status, 0},
2119 	{"i915_ips_status", i915_ips_status, 0},
2120 	{"i915_sr_status", i915_sr_status, 0},
2121 	{"i915_opregion", i915_opregion, 0},
2122 	{"i915_vbt", i915_vbt, 0},
2123 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2124 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
2125 	{"i915_power_domain_info", i915_power_domain_info, 0},
2126 	{"i915_dmc_info", i915_dmc_info, 0},
2127 	{"i915_display_info", i915_display_info, 0},
2128 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
2129 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
2130 	{"i915_ddb_info", i915_ddb_info, 0},
2131 	{"i915_drrs_status", i915_drrs_status, 0},
2132 	{"i915_lpsp_status", i915_lpsp_status, 0},
2133 };
2134 
2135 static const struct {
2136 	const char *name;
2137 	const struct file_operations *fops;
2138 } intel_display_debugfs_files[] = {
2139 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2140 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2141 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2142 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2143 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
2144 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
2145 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
2146 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
2147 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2148 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2149 	{"i915_ipc_status", &i915_ipc_status_fops},
2150 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
2151 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2152 };
2153 
2154 void intel_display_debugfs_register(struct drm_i915_private *i915)
2155 {
2156 	struct drm_minor *minor = i915->drm.primary;
2157 	int i;
2158 
2159 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2160 		debugfs_create_file(intel_display_debugfs_files[i].name,
2161 				    S_IRUGO | S_IWUSR,
2162 				    minor->debugfs_root,
2163 				    to_i915(minor->dev),
2164 				    intel_display_debugfs_files[i].fops);
2165 	}
2166 
2167 	drm_debugfs_create_files(intel_display_debugfs_list,
2168 				 ARRAY_SIZE(intel_display_debugfs_list),
2169 				 minor->debugfs_root, minor);
2170 }
2171 
2172 static int i915_panel_show(struct seq_file *m, void *data)
2173 {
2174 	struct drm_connector *connector = m->private;
2175 	struct intel_dp *intel_dp =
2176 		intel_attached_dp(to_intel_connector(connector));
2177 
2178 	if (connector->status != connector_status_connected)
2179 		return -ENODEV;
2180 
2181 	seq_printf(m, "Panel power up delay: %d\n",
2182 		   intel_dp->pps.panel_power_up_delay);
2183 	seq_printf(m, "Panel power down delay: %d\n",
2184 		   intel_dp->pps.panel_power_down_delay);
2185 	seq_printf(m, "Backlight on delay: %d\n",
2186 		   intel_dp->pps.backlight_on_delay);
2187 	seq_printf(m, "Backlight off delay: %d\n",
2188 		   intel_dp->pps.backlight_off_delay);
2189 
2190 	return 0;
2191 }
2192 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2193 
2194 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2195 {
2196 	struct drm_connector *connector = m->private;
2197 	struct drm_i915_private *i915 = to_i915(connector->dev);
2198 	struct intel_connector *intel_connector = to_intel_connector(connector);
2199 	int ret;
2200 
2201 	ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
2202 	if (ret)
2203 		return ret;
2204 
2205 	if (!connector->encoder || connector->status != connector_status_connected) {
2206 		ret = -ENODEV;
2207 		goto out;
2208 	}
2209 
2210 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2211 		   connector->base.id);
2212 	intel_hdcp_info(m, intel_connector);
2213 
2214 out:
2215 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
2216 
2217 	return ret;
2218 }
2219 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2220 
2221 static int i915_psr_status_show(struct seq_file *m, void *data)
2222 {
2223 	struct drm_connector *connector = m->private;
2224 	struct intel_dp *intel_dp =
2225 		intel_attached_dp(to_intel_connector(connector));
2226 
2227 	return intel_psr_status(m, intel_dp);
2228 }
2229 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2230 
2231 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2232 				seq_puts(m, "LPSP: incapable\n"))
2233 
2234 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2235 {
2236 	struct drm_connector *connector = m->private;
2237 	struct drm_i915_private *i915 = to_i915(connector->dev);
2238 	struct intel_encoder *encoder;
2239 
2240 	encoder = intel_attached_encoder(to_intel_connector(connector));
2241 	if (!encoder)
2242 		return -ENODEV;
2243 
2244 	if (connector->status != connector_status_connected)
2245 		return -ENODEV;
2246 
2247 	switch (DISPLAY_VER(i915)) {
2248 	case 12:
2249 		/*
2250 		 * Actually TGL can drive LPSP on port till DDI_C
2251 		 * but there is no physical connected DDI_C on TGL sku's,
2252 		 * even driver is not initilizing DDI_C port for gen12.
2253 		 */
2254 		LPSP_CAPABLE(encoder->port <= PORT_B);
2255 		break;
2256 	case 11:
2257 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2258 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2259 		break;
2260 	case 10:
2261 	case 9:
2262 		LPSP_CAPABLE(encoder->port == PORT_A &&
2263 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2264 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2265 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2266 		break;
2267 	default:
2268 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2269 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2270 	}
2271 
2272 	return 0;
2273 }
2274 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2275 
2276 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2277 {
2278 	struct drm_connector *connector = m->private;
2279 	struct drm_device *dev = connector->dev;
2280 	struct drm_crtc *crtc;
2281 	struct intel_dp *intel_dp;
2282 	struct drm_modeset_acquire_ctx ctx;
2283 	struct intel_crtc_state *crtc_state = NULL;
2284 	int ret = 0;
2285 	bool try_again = false;
2286 
2287 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2288 
2289 	do {
2290 		try_again = false;
2291 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2292 				       &ctx);
2293 		if (ret) {
2294 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2295 				try_again = true;
2296 				continue;
2297 			}
2298 			break;
2299 		}
2300 		crtc = connector->state->crtc;
2301 		if (connector->status != connector_status_connected || !crtc) {
2302 			ret = -ENODEV;
2303 			break;
2304 		}
2305 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2306 		if (ret == -EDEADLK) {
2307 			ret = drm_modeset_backoff(&ctx);
2308 			if (!ret) {
2309 				try_again = true;
2310 				continue;
2311 			}
2312 			break;
2313 		} else if (ret) {
2314 			break;
2315 		}
2316 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2317 		crtc_state = to_intel_crtc_state(crtc->state);
2318 		seq_printf(m, "DSC_Enabled: %s\n",
2319 			   yesno(crtc_state->dsc.compression_enable));
2320 		seq_printf(m, "DSC_Sink_Support: %s\n",
2321 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2322 		seq_printf(m, "Force_DSC_Enable: %s\n",
2323 			   yesno(intel_dp->force_dsc_en));
2324 		if (!intel_dp_is_edp(intel_dp))
2325 			seq_printf(m, "FEC_Sink_Support: %s\n",
2326 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2327 	} while (try_again);
2328 
2329 	drm_modeset_drop_locks(&ctx);
2330 	drm_modeset_acquire_fini(&ctx);
2331 
2332 	return ret;
2333 }
2334 
2335 static ssize_t i915_dsc_fec_support_write(struct file *file,
2336 					  const char __user *ubuf,
2337 					  size_t len, loff_t *offp)
2338 {
2339 	bool dsc_enable = false;
2340 	int ret;
2341 	struct drm_connector *connector =
2342 		((struct seq_file *)file->private_data)->private;
2343 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2344 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2345 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2346 
2347 	if (len == 0)
2348 		return 0;
2349 
2350 	drm_dbg(&i915->drm,
2351 		"Copied %zu bytes from user to force DSC\n", len);
2352 
2353 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2354 	if (ret < 0)
2355 		return ret;
2356 
2357 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2358 		(dsc_enable) ? "true" : "false");
2359 	intel_dp->force_dsc_en = dsc_enable;
2360 
2361 	*offp += len;
2362 	return len;
2363 }
2364 
2365 static int i915_dsc_fec_support_open(struct inode *inode,
2366 				     struct file *file)
2367 {
2368 	return single_open(file, i915_dsc_fec_support_show,
2369 			   inode->i_private);
2370 }
2371 
2372 static const struct file_operations i915_dsc_fec_support_fops = {
2373 	.owner = THIS_MODULE,
2374 	.open = i915_dsc_fec_support_open,
2375 	.read = seq_read,
2376 	.llseek = seq_lseek,
2377 	.release = single_release,
2378 	.write = i915_dsc_fec_support_write
2379 };
2380 
2381 /**
2382  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2383  * @connector: pointer to a registered drm_connector
2384  *
2385  * Cleanup will be done by drm_connector_unregister() through a call to
2386  * drm_debugfs_connector_remove().
2387  *
2388  * Returns 0 on success, negative error codes on error.
2389  */
2390 int intel_connector_debugfs_add(struct drm_connector *connector)
2391 {
2392 	struct dentry *root = connector->debugfs_entry;
2393 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2394 
2395 	/* The connector must have been registered beforehands. */
2396 	if (!root)
2397 		return -ENODEV;
2398 
2399 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2400 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2401 				    connector, &i915_panel_fops);
2402 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2403 				    connector, &i915_psr_sink_status_fops);
2404 	}
2405 
2406 	if (HAS_PSR(dev_priv) &&
2407 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2408 		debugfs_create_file("i915_psr_status", 0444, root,
2409 				    connector, &i915_psr_status_fops);
2410 	}
2411 
2412 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2413 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2414 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2415 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2416 				    connector, &i915_hdcp_sink_capability_fops);
2417 	}
2418 
2419 	if ((DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) && ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && !to_intel_connector(connector)->mst_port) || connector->connector_type == DRM_MODE_CONNECTOR_eDP))
2420 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
2421 				    connector, &i915_dsc_fec_support_fops);
2422 
2423 	/* Legacy panels doesn't lpsp on any platform */
2424 	if ((DISPLAY_VER(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2425 	     IS_BROADWELL(dev_priv)) &&
2426 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2427 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2428 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2429 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2430 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2431 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2432 				    connector, &i915_lpsp_capability_fops);
2433 
2434 	return 0;
2435 }
2436 
2437 /**
2438  * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
2439  * @crtc: pointer to a drm_crtc
2440  *
2441  * Returns 0 on success, negative error codes on error.
2442  *
2443  * Failure to add debugfs entries should generally be ignored.
2444  */
2445 int intel_crtc_debugfs_add(struct drm_crtc *crtc)
2446 {
2447 	if (!crtc->debugfs_entry)
2448 		return -ENODEV;
2449 
2450 	crtc_updates_add(crtc);
2451 	return 0;
2452 }
2453