1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
31 
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
34 
35 #include "display/intel_display_types.h"
36 #include "display/intel_dp.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_hdcp.h"
39 #include "display/intel_hdmi.h"
40 #include "display/intel_psr.h"
41 
42 #include "gem/i915_gem_context.h"
43 #include "gt/intel_gt_pm.h"
44 #include "gt/intel_gt_requests.h"
45 #include "gt/intel_reset.h"
46 #include "gt/intel_rc6.h"
47 #include "gt/intel_rps.h"
48 #include "gt/uc/intel_guc_submission.h"
49 
50 #include "i915_debugfs.h"
51 #include "i915_irq.h"
52 #include "i915_trace.h"
53 #include "intel_csr.h"
54 #include "intel_pm.h"
55 #include "intel_sideband.h"
56 
57 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
58 {
59 	return to_i915(node->minor->dev);
60 }
61 
62 static int i915_capabilities(struct seq_file *m, void *data)
63 {
64 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
65 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
66 	struct drm_printer p = drm_seq_file_printer(m);
67 	const char *msg;
68 
69 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
70 	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
71 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
72 
73 	msg = "n/a";
74 #ifdef CONFIG_INTEL_IOMMU
75 	msg = enableddisabled(intel_iommu_gfx_mapped);
76 #endif
77 	seq_printf(m, "iommu: %s\n", msg);
78 
79 	intel_device_info_dump_flags(info, &p);
80 	intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
81 	intel_driver_caps_print(&dev_priv->caps, &p);
82 
83 	kernel_param_lock(THIS_MODULE);
84 	i915_params_dump(&i915_modparams, &p);
85 	kernel_param_unlock(THIS_MODULE);
86 
87 	return 0;
88 }
89 
90 static char get_tiling_flag(struct drm_i915_gem_object *obj)
91 {
92 	switch (i915_gem_object_get_tiling(obj)) {
93 	default:
94 	case I915_TILING_NONE: return ' ';
95 	case I915_TILING_X: return 'X';
96 	case I915_TILING_Y: return 'Y';
97 	}
98 }
99 
100 static char get_global_flag(struct drm_i915_gem_object *obj)
101 {
102 	return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
103 }
104 
105 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
106 {
107 	return obj->mm.mapping ? 'M' : ' ';
108 }
109 
110 static const char *
111 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
112 {
113 	size_t x = 0;
114 
115 	switch (page_sizes) {
116 	case 0:
117 		return "";
118 	case I915_GTT_PAGE_SIZE_4K:
119 		return "4K";
120 	case I915_GTT_PAGE_SIZE_64K:
121 		return "64K";
122 	case I915_GTT_PAGE_SIZE_2M:
123 		return "2M";
124 	default:
125 		if (!buf)
126 			return "M";
127 
128 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
129 			x += snprintf(buf + x, len - x, "2M, ");
130 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
131 			x += snprintf(buf + x, len - x, "64K, ");
132 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
133 			x += snprintf(buf + x, len - x, "4K, ");
134 		buf[x-2] = '\0';
135 
136 		return buf;
137 	}
138 }
139 
140 static void
141 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
142 {
143 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
144 	struct intel_engine_cs *engine;
145 	struct i915_vma *vma;
146 	int pin_count = 0;
147 
148 	seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
149 		   &obj->base,
150 		   get_tiling_flag(obj),
151 		   get_global_flag(obj),
152 		   get_pin_mapped_flag(obj),
153 		   obj->base.size / 1024,
154 		   obj->read_domains,
155 		   obj->write_domain,
156 		   i915_cache_level_str(dev_priv, obj->cache_level),
157 		   obj->mm.dirty ? " dirty" : "",
158 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
159 	if (obj->base.name)
160 		seq_printf(m, " (name: %d)", obj->base.name);
161 
162 	spin_lock(&obj->vma.lock);
163 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
164 		if (!drm_mm_node_allocated(&vma->node))
165 			continue;
166 
167 		spin_unlock(&obj->vma.lock);
168 
169 		if (i915_vma_is_pinned(vma))
170 			pin_count++;
171 
172 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
173 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
174 			   vma->node.start, vma->node.size,
175 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
176 		if (i915_vma_is_ggtt(vma)) {
177 			switch (vma->ggtt_view.type) {
178 			case I915_GGTT_VIEW_NORMAL:
179 				seq_puts(m, ", normal");
180 				break;
181 
182 			case I915_GGTT_VIEW_PARTIAL:
183 				seq_printf(m, ", partial [%08llx+%x]",
184 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
185 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
186 				break;
187 
188 			case I915_GGTT_VIEW_ROTATED:
189 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
190 					   vma->ggtt_view.rotated.plane[0].width,
191 					   vma->ggtt_view.rotated.plane[0].height,
192 					   vma->ggtt_view.rotated.plane[0].stride,
193 					   vma->ggtt_view.rotated.plane[0].offset,
194 					   vma->ggtt_view.rotated.plane[1].width,
195 					   vma->ggtt_view.rotated.plane[1].height,
196 					   vma->ggtt_view.rotated.plane[1].stride,
197 					   vma->ggtt_view.rotated.plane[1].offset);
198 				break;
199 
200 			case I915_GGTT_VIEW_REMAPPED:
201 				seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
202 					   vma->ggtt_view.remapped.plane[0].width,
203 					   vma->ggtt_view.remapped.plane[0].height,
204 					   vma->ggtt_view.remapped.plane[0].stride,
205 					   vma->ggtt_view.remapped.plane[0].offset,
206 					   vma->ggtt_view.remapped.plane[1].width,
207 					   vma->ggtt_view.remapped.plane[1].height,
208 					   vma->ggtt_view.remapped.plane[1].stride,
209 					   vma->ggtt_view.remapped.plane[1].offset);
210 				break;
211 
212 			default:
213 				MISSING_CASE(vma->ggtt_view.type);
214 				break;
215 			}
216 		}
217 		if (vma->fence)
218 			seq_printf(m, " , fence: %d", vma->fence->id);
219 		seq_puts(m, ")");
220 
221 		spin_lock(&obj->vma.lock);
222 	}
223 	spin_unlock(&obj->vma.lock);
224 
225 	seq_printf(m, " (pinned x %d)", pin_count);
226 	if (obj->stolen)
227 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
228 	if (i915_gem_object_is_framebuffer(obj))
229 		seq_printf(m, " (fb)");
230 
231 	engine = i915_gem_object_last_write_engine(obj);
232 	if (engine)
233 		seq_printf(m, " (%s)", engine->name);
234 }
235 
236 struct file_stats {
237 	struct i915_address_space *vm;
238 	unsigned long count;
239 	u64 total, unbound;
240 	u64 active, inactive;
241 	u64 closed;
242 };
243 
244 static int per_file_stats(int id, void *ptr, void *data)
245 {
246 	struct drm_i915_gem_object *obj = ptr;
247 	struct file_stats *stats = data;
248 	struct i915_vma *vma;
249 
250 	if (!kref_get_unless_zero(&obj->base.refcount))
251 		return 0;
252 
253 	stats->count++;
254 	stats->total += obj->base.size;
255 	if (!atomic_read(&obj->bind_count))
256 		stats->unbound += obj->base.size;
257 
258 	spin_lock(&obj->vma.lock);
259 	if (!stats->vm) {
260 		for_each_ggtt_vma(vma, obj) {
261 			if (!drm_mm_node_allocated(&vma->node))
262 				continue;
263 
264 			if (i915_vma_is_active(vma))
265 				stats->active += vma->node.size;
266 			else
267 				stats->inactive += vma->node.size;
268 
269 			if (i915_vma_is_closed(vma))
270 				stats->closed += vma->node.size;
271 		}
272 	} else {
273 		struct rb_node *p = obj->vma.tree.rb_node;
274 
275 		while (p) {
276 			long cmp;
277 
278 			vma = rb_entry(p, typeof(*vma), obj_node);
279 			cmp = i915_vma_compare(vma, stats->vm, NULL);
280 			if (cmp == 0) {
281 				if (drm_mm_node_allocated(&vma->node)) {
282 					if (i915_vma_is_active(vma))
283 						stats->active += vma->node.size;
284 					else
285 						stats->inactive += vma->node.size;
286 
287 					if (i915_vma_is_closed(vma))
288 						stats->closed += vma->node.size;
289 				}
290 				break;
291 			}
292 			if (cmp < 0)
293 				p = p->rb_right;
294 			else
295 				p = p->rb_left;
296 		}
297 	}
298 	spin_unlock(&obj->vma.lock);
299 
300 	i915_gem_object_put(obj);
301 	return 0;
302 }
303 
304 #define print_file_stats(m, name, stats) do { \
305 	if (stats.count) \
306 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
307 			   name, \
308 			   stats.count, \
309 			   stats.total, \
310 			   stats.active, \
311 			   stats.inactive, \
312 			   stats.unbound, \
313 			   stats.closed); \
314 } while (0)
315 
316 static void print_context_stats(struct seq_file *m,
317 				struct drm_i915_private *i915)
318 {
319 	struct file_stats kstats = {};
320 	struct i915_gem_context *ctx, *cn;
321 
322 	spin_lock(&i915->gem.contexts.lock);
323 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
324 		struct i915_gem_engines_iter it;
325 		struct intel_context *ce;
326 
327 		if (!kref_get_unless_zero(&ctx->ref))
328 			continue;
329 
330 		spin_unlock(&i915->gem.contexts.lock);
331 
332 		for_each_gem_engine(ce,
333 				    i915_gem_context_lock_engines(ctx), it) {
334 			intel_context_lock_pinned(ce);
335 			if (intel_context_is_pinned(ce)) {
336 				rcu_read_lock();
337 				if (ce->state)
338 					per_file_stats(0,
339 						       ce->state->obj, &kstats);
340 				per_file_stats(0, ce->ring->vma->obj, &kstats);
341 				rcu_read_unlock();
342 			}
343 			intel_context_unlock_pinned(ce);
344 		}
345 		i915_gem_context_unlock_engines(ctx);
346 
347 		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
348 			struct file_stats stats = {
349 				.vm = rcu_access_pointer(ctx->vm),
350 			};
351 			struct drm_file *file = ctx->file_priv->file;
352 			struct task_struct *task;
353 			char name[80];
354 
355 			rcu_read_lock();
356 			idr_for_each(&file->object_idr, per_file_stats, &stats);
357 			rcu_read_unlock();
358 
359 			rcu_read_lock();
360 			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
361 			snprintf(name, sizeof(name), "%s",
362 				 task ? task->comm : "<unknown>");
363 			rcu_read_unlock();
364 
365 			print_file_stats(m, name, stats);
366 		}
367 
368 		spin_lock(&i915->gem.contexts.lock);
369 		list_safe_reset_next(ctx, cn, link);
370 		i915_gem_context_put(ctx);
371 	}
372 	spin_unlock(&i915->gem.contexts.lock);
373 
374 	print_file_stats(m, "[k]contexts", kstats);
375 }
376 
377 static int i915_gem_object_info(struct seq_file *m, void *data)
378 {
379 	struct drm_i915_private *i915 = node_to_i915(m->private);
380 
381 	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
382 		   i915->mm.shrink_count,
383 		   atomic_read(&i915->mm.free_count),
384 		   i915->mm.shrink_memory);
385 
386 	seq_putc(m, '\n');
387 
388 	print_context_stats(m, i915);
389 
390 	return 0;
391 }
392 
393 static void gen8_display_interrupt_info(struct seq_file *m)
394 {
395 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
396 	enum pipe pipe;
397 
398 	for_each_pipe(dev_priv, pipe) {
399 		enum intel_display_power_domain power_domain;
400 		intel_wakeref_t wakeref;
401 
402 		power_domain = POWER_DOMAIN_PIPE(pipe);
403 		wakeref = intel_display_power_get_if_enabled(dev_priv,
404 							     power_domain);
405 		if (!wakeref) {
406 			seq_printf(m, "Pipe %c power disabled\n",
407 				   pipe_name(pipe));
408 			continue;
409 		}
410 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
411 			   pipe_name(pipe),
412 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
413 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
414 			   pipe_name(pipe),
415 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
416 		seq_printf(m, "Pipe %c IER:\t%08x\n",
417 			   pipe_name(pipe),
418 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
419 
420 		intel_display_power_put(dev_priv, power_domain, wakeref);
421 	}
422 
423 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
424 		   I915_READ(GEN8_DE_PORT_IMR));
425 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
426 		   I915_READ(GEN8_DE_PORT_IIR));
427 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
428 		   I915_READ(GEN8_DE_PORT_IER));
429 
430 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
431 		   I915_READ(GEN8_DE_MISC_IMR));
432 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
433 		   I915_READ(GEN8_DE_MISC_IIR));
434 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
435 		   I915_READ(GEN8_DE_MISC_IER));
436 
437 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
438 		   I915_READ(GEN8_PCU_IMR));
439 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
440 		   I915_READ(GEN8_PCU_IIR));
441 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
442 		   I915_READ(GEN8_PCU_IER));
443 }
444 
445 static int i915_interrupt_info(struct seq_file *m, void *data)
446 {
447 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
448 	struct intel_engine_cs *engine;
449 	intel_wakeref_t wakeref;
450 	int i, pipe;
451 
452 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
453 
454 	if (IS_CHERRYVIEW(dev_priv)) {
455 		intel_wakeref_t pref;
456 
457 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
458 			   I915_READ(GEN8_MASTER_IRQ));
459 
460 		seq_printf(m, "Display IER:\t%08x\n",
461 			   I915_READ(VLV_IER));
462 		seq_printf(m, "Display IIR:\t%08x\n",
463 			   I915_READ(VLV_IIR));
464 		seq_printf(m, "Display IIR_RW:\t%08x\n",
465 			   I915_READ(VLV_IIR_RW));
466 		seq_printf(m, "Display IMR:\t%08x\n",
467 			   I915_READ(VLV_IMR));
468 		for_each_pipe(dev_priv, pipe) {
469 			enum intel_display_power_domain power_domain;
470 
471 			power_domain = POWER_DOMAIN_PIPE(pipe);
472 			pref = intel_display_power_get_if_enabled(dev_priv,
473 								  power_domain);
474 			if (!pref) {
475 				seq_printf(m, "Pipe %c power disabled\n",
476 					   pipe_name(pipe));
477 				continue;
478 			}
479 
480 			seq_printf(m, "Pipe %c stat:\t%08x\n",
481 				   pipe_name(pipe),
482 				   I915_READ(PIPESTAT(pipe)));
483 
484 			intel_display_power_put(dev_priv, power_domain, pref);
485 		}
486 
487 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
488 		seq_printf(m, "Port hotplug:\t%08x\n",
489 			   I915_READ(PORT_HOTPLUG_EN));
490 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
491 			   I915_READ(VLV_DPFLIPSTAT));
492 		seq_printf(m, "DPINVGTT:\t%08x\n",
493 			   I915_READ(DPINVGTT));
494 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
495 
496 		for (i = 0; i < 4; i++) {
497 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
498 				   i, I915_READ(GEN8_GT_IMR(i)));
499 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
500 				   i, I915_READ(GEN8_GT_IIR(i)));
501 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
502 				   i, I915_READ(GEN8_GT_IER(i)));
503 		}
504 
505 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
506 			   I915_READ(GEN8_PCU_IMR));
507 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
508 			   I915_READ(GEN8_PCU_IIR));
509 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
510 			   I915_READ(GEN8_PCU_IER));
511 	} else if (INTEL_GEN(dev_priv) >= 11) {
512 		seq_printf(m, "Master Interrupt Control:  %08x\n",
513 			   I915_READ(GEN11_GFX_MSTR_IRQ));
514 
515 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
516 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
517 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
518 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
519 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
520 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
521 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
522 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
523 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
524 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
525 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
526 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
527 
528 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
529 			   I915_READ(GEN11_DISPLAY_INT_CTL));
530 
531 		gen8_display_interrupt_info(m);
532 	} else if (INTEL_GEN(dev_priv) >= 8) {
533 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
534 			   I915_READ(GEN8_MASTER_IRQ));
535 
536 		for (i = 0; i < 4; i++) {
537 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
538 				   i, I915_READ(GEN8_GT_IMR(i)));
539 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
540 				   i, I915_READ(GEN8_GT_IIR(i)));
541 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
542 				   i, I915_READ(GEN8_GT_IER(i)));
543 		}
544 
545 		gen8_display_interrupt_info(m);
546 	} else if (IS_VALLEYVIEW(dev_priv)) {
547 		intel_wakeref_t pref;
548 
549 		seq_printf(m, "Display IER:\t%08x\n",
550 			   I915_READ(VLV_IER));
551 		seq_printf(m, "Display IIR:\t%08x\n",
552 			   I915_READ(VLV_IIR));
553 		seq_printf(m, "Display IIR_RW:\t%08x\n",
554 			   I915_READ(VLV_IIR_RW));
555 		seq_printf(m, "Display IMR:\t%08x\n",
556 			   I915_READ(VLV_IMR));
557 		for_each_pipe(dev_priv, pipe) {
558 			enum intel_display_power_domain power_domain;
559 
560 			power_domain = POWER_DOMAIN_PIPE(pipe);
561 			pref = intel_display_power_get_if_enabled(dev_priv,
562 								  power_domain);
563 			if (!pref) {
564 				seq_printf(m, "Pipe %c power disabled\n",
565 					   pipe_name(pipe));
566 				continue;
567 			}
568 
569 			seq_printf(m, "Pipe %c stat:\t%08x\n",
570 				   pipe_name(pipe),
571 				   I915_READ(PIPESTAT(pipe)));
572 			intel_display_power_put(dev_priv, power_domain, pref);
573 		}
574 
575 		seq_printf(m, "Master IER:\t%08x\n",
576 			   I915_READ(VLV_MASTER_IER));
577 
578 		seq_printf(m, "Render IER:\t%08x\n",
579 			   I915_READ(GTIER));
580 		seq_printf(m, "Render IIR:\t%08x\n",
581 			   I915_READ(GTIIR));
582 		seq_printf(m, "Render IMR:\t%08x\n",
583 			   I915_READ(GTIMR));
584 
585 		seq_printf(m, "PM IER:\t\t%08x\n",
586 			   I915_READ(GEN6_PMIER));
587 		seq_printf(m, "PM IIR:\t\t%08x\n",
588 			   I915_READ(GEN6_PMIIR));
589 		seq_printf(m, "PM IMR:\t\t%08x\n",
590 			   I915_READ(GEN6_PMIMR));
591 
592 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
593 		seq_printf(m, "Port hotplug:\t%08x\n",
594 			   I915_READ(PORT_HOTPLUG_EN));
595 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
596 			   I915_READ(VLV_DPFLIPSTAT));
597 		seq_printf(m, "DPINVGTT:\t%08x\n",
598 			   I915_READ(DPINVGTT));
599 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
600 
601 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
602 		seq_printf(m, "Interrupt enable:    %08x\n",
603 			   I915_READ(GEN2_IER));
604 		seq_printf(m, "Interrupt identity:  %08x\n",
605 			   I915_READ(GEN2_IIR));
606 		seq_printf(m, "Interrupt mask:      %08x\n",
607 			   I915_READ(GEN2_IMR));
608 		for_each_pipe(dev_priv, pipe)
609 			seq_printf(m, "Pipe %c stat:         %08x\n",
610 				   pipe_name(pipe),
611 				   I915_READ(PIPESTAT(pipe)));
612 	} else {
613 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
614 			   I915_READ(DEIER));
615 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
616 			   I915_READ(DEIIR));
617 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
618 			   I915_READ(DEIMR));
619 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
620 			   I915_READ(SDEIER));
621 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
622 			   I915_READ(SDEIIR));
623 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
624 			   I915_READ(SDEIMR));
625 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
626 			   I915_READ(GTIER));
627 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
628 			   I915_READ(GTIIR));
629 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
630 			   I915_READ(GTIMR));
631 	}
632 
633 	if (INTEL_GEN(dev_priv) >= 11) {
634 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
635 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
636 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
637 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
638 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
639 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
640 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
641 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
642 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
643 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
644 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
645 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
646 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
647 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
648 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
649 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
650 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
651 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
652 
653 	} else if (INTEL_GEN(dev_priv) >= 6) {
654 		for_each_uabi_engine(engine, dev_priv) {
655 			seq_printf(m,
656 				   "Graphics Interrupt mask (%s):	%08x\n",
657 				   engine->name, ENGINE_READ(engine, RING_IMR));
658 		}
659 	}
660 
661 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
662 
663 	return 0;
664 }
665 
666 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
667 {
668 	struct drm_i915_private *i915 = node_to_i915(m->private);
669 	unsigned int i;
670 
671 	seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
672 
673 	rcu_read_lock();
674 	for (i = 0; i < i915->ggtt.num_fences; i++) {
675 		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
676 		struct i915_vma *vma = reg->vma;
677 
678 		seq_printf(m, "Fence %d, pin count = %d, object = ",
679 			   i, atomic_read(&reg->pin_count));
680 		if (!vma)
681 			seq_puts(m, "unused");
682 		else
683 			describe_obj(m, vma->obj);
684 		seq_putc(m, '\n');
685 	}
686 	rcu_read_unlock();
687 
688 	return 0;
689 }
690 
691 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
692 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
693 			      size_t count, loff_t *pos)
694 {
695 	struct i915_gpu_state *error;
696 	ssize_t ret;
697 	void *buf;
698 
699 	error = file->private_data;
700 	if (!error)
701 		return 0;
702 
703 	/* Bounce buffer required because of kernfs __user API convenience. */
704 	buf = kmalloc(count, GFP_KERNEL);
705 	if (!buf)
706 		return -ENOMEM;
707 
708 	ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
709 	if (ret <= 0)
710 		goto out;
711 
712 	if (!copy_to_user(ubuf, buf, ret))
713 		*pos += ret;
714 	else
715 		ret = -EFAULT;
716 
717 out:
718 	kfree(buf);
719 	return ret;
720 }
721 
722 static int gpu_state_release(struct inode *inode, struct file *file)
723 {
724 	i915_gpu_state_put(file->private_data);
725 	return 0;
726 }
727 
728 static int i915_gpu_info_open(struct inode *inode, struct file *file)
729 {
730 	struct drm_i915_private *i915 = inode->i_private;
731 	struct i915_gpu_state *gpu;
732 	intel_wakeref_t wakeref;
733 
734 	gpu = NULL;
735 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
736 		gpu = i915_capture_gpu_state(i915);
737 	if (IS_ERR(gpu))
738 		return PTR_ERR(gpu);
739 
740 	file->private_data = gpu;
741 	return 0;
742 }
743 
744 static const struct file_operations i915_gpu_info_fops = {
745 	.owner = THIS_MODULE,
746 	.open = i915_gpu_info_open,
747 	.read = gpu_state_read,
748 	.llseek = default_llseek,
749 	.release = gpu_state_release,
750 };
751 
752 static ssize_t
753 i915_error_state_write(struct file *filp,
754 		       const char __user *ubuf,
755 		       size_t cnt,
756 		       loff_t *ppos)
757 {
758 	struct i915_gpu_state *error = filp->private_data;
759 
760 	if (!error)
761 		return 0;
762 
763 	DRM_DEBUG_DRIVER("Resetting error state\n");
764 	i915_reset_error_state(error->i915);
765 
766 	return cnt;
767 }
768 
769 static int i915_error_state_open(struct inode *inode, struct file *file)
770 {
771 	struct i915_gpu_state *error;
772 
773 	error = i915_first_error_state(inode->i_private);
774 	if (IS_ERR(error))
775 		return PTR_ERR(error);
776 
777 	file->private_data  = error;
778 	return 0;
779 }
780 
781 static const struct file_operations i915_error_state_fops = {
782 	.owner = THIS_MODULE,
783 	.open = i915_error_state_open,
784 	.read = gpu_state_read,
785 	.write = i915_error_state_write,
786 	.llseek = default_llseek,
787 	.release = gpu_state_release,
788 };
789 #endif
790 
791 static int i915_frequency_info(struct seq_file *m, void *unused)
792 {
793 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
794 	struct intel_uncore *uncore = &dev_priv->uncore;
795 	struct intel_rps *rps = &dev_priv->gt.rps;
796 	intel_wakeref_t wakeref;
797 	int ret = 0;
798 
799 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
800 
801 	if (IS_GEN(dev_priv, 5)) {
802 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
803 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
804 
805 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
806 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
807 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
808 			   MEMSTAT_VID_SHIFT);
809 		seq_printf(m, "Current P-state: %d\n",
810 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
811 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
812 		u32 rpmodectl, freq_sts;
813 
814 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
815 		seq_printf(m, "Video Turbo Mode: %s\n",
816 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
817 		seq_printf(m, "HW control enabled: %s\n",
818 			   yesno(rpmodectl & GEN6_RP_ENABLE));
819 		seq_printf(m, "SW control enabled: %s\n",
820 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
821 				  GEN6_RP_MEDIA_SW_MODE));
822 
823 		vlv_punit_get(dev_priv);
824 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
825 		vlv_punit_put(dev_priv);
826 
827 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
828 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
829 
830 		seq_printf(m, "actual GPU freq: %d MHz\n",
831 			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
832 
833 		seq_printf(m, "current GPU freq: %d MHz\n",
834 			   intel_gpu_freq(rps, rps->cur_freq));
835 
836 		seq_printf(m, "max GPU freq: %d MHz\n",
837 			   intel_gpu_freq(rps, rps->max_freq));
838 
839 		seq_printf(m, "min GPU freq: %d MHz\n",
840 			   intel_gpu_freq(rps, rps->min_freq));
841 
842 		seq_printf(m, "idle GPU freq: %d MHz\n",
843 			   intel_gpu_freq(rps, rps->idle_freq));
844 
845 		seq_printf(m,
846 			   "efficient (RPe) frequency: %d MHz\n",
847 			   intel_gpu_freq(rps, rps->efficient_freq));
848 	} else if (INTEL_GEN(dev_priv) >= 6) {
849 		u32 rp_state_limits;
850 		u32 gt_perf_status;
851 		u32 rp_state_cap;
852 		u32 rpmodectl, rpinclimit, rpdeclimit;
853 		u32 rpstat, cagf, reqf;
854 		u32 rpupei, rpcurup, rpprevup;
855 		u32 rpdownei, rpcurdown, rpprevdown;
856 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
857 		int max_freq;
858 
859 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
860 		if (IS_GEN9_LP(dev_priv)) {
861 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
862 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
863 		} else {
864 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
865 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
866 		}
867 
868 		/* RPSTAT1 is in the GT power well */
869 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
870 
871 		reqf = I915_READ(GEN6_RPNSWREQ);
872 		if (INTEL_GEN(dev_priv) >= 9)
873 			reqf >>= 23;
874 		else {
875 			reqf &= ~GEN6_TURBO_DISABLE;
876 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
877 				reqf >>= 24;
878 			else
879 				reqf >>= 25;
880 		}
881 		reqf = intel_gpu_freq(rps, reqf);
882 
883 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
884 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
885 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
886 
887 		rpstat = I915_READ(GEN6_RPSTAT1);
888 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
889 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
890 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
891 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
892 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
893 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
894 		cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
895 
896 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
897 
898 		if (INTEL_GEN(dev_priv) >= 11) {
899 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
900 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
901 			/*
902 			 * The equivalent to the PM ISR & IIR cannot be read
903 			 * without affecting the current state of the system
904 			 */
905 			pm_isr = 0;
906 			pm_iir = 0;
907 		} else if (INTEL_GEN(dev_priv) >= 8) {
908 			pm_ier = I915_READ(GEN8_GT_IER(2));
909 			pm_imr = I915_READ(GEN8_GT_IMR(2));
910 			pm_isr = I915_READ(GEN8_GT_ISR(2));
911 			pm_iir = I915_READ(GEN8_GT_IIR(2));
912 		} else {
913 			pm_ier = I915_READ(GEN6_PMIER);
914 			pm_imr = I915_READ(GEN6_PMIMR);
915 			pm_isr = I915_READ(GEN6_PMISR);
916 			pm_iir = I915_READ(GEN6_PMIIR);
917 		}
918 		pm_mask = I915_READ(GEN6_PMINTRMSK);
919 
920 		seq_printf(m, "Video Turbo Mode: %s\n",
921 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
922 		seq_printf(m, "HW control enabled: %s\n",
923 			   yesno(rpmodectl & GEN6_RP_ENABLE));
924 		seq_printf(m, "SW control enabled: %s\n",
925 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
926 				  GEN6_RP_MEDIA_SW_MODE));
927 
928 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
929 			   pm_ier, pm_imr, pm_mask);
930 		if (INTEL_GEN(dev_priv) <= 10)
931 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
932 				   pm_isr, pm_iir);
933 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
934 			   rps->pm_intrmsk_mbz);
935 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
936 		seq_printf(m, "Render p-state ratio: %d\n",
937 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
938 		seq_printf(m, "Render p-state VID: %d\n",
939 			   gt_perf_status & 0xff);
940 		seq_printf(m, "Render p-state limit: %d\n",
941 			   rp_state_limits & 0xff);
942 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
943 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
944 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
945 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
946 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
947 		seq_printf(m, "CAGF: %dMHz\n", cagf);
948 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
949 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
950 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
951 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
952 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
953 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
954 		seq_printf(m, "Up threshold: %d%%\n",
955 			   rps->power.up_threshold);
956 
957 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
958 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
959 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
960 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
961 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
962 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
963 		seq_printf(m, "Down threshold: %d%%\n",
964 			   rps->power.down_threshold);
965 
966 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
967 			    rp_state_cap >> 16) & 0xff;
968 		max_freq *= (IS_GEN9_BC(dev_priv) ||
969 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
970 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
971 			   intel_gpu_freq(rps, max_freq));
972 
973 		max_freq = (rp_state_cap & 0xff00) >> 8;
974 		max_freq *= (IS_GEN9_BC(dev_priv) ||
975 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
976 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
977 			   intel_gpu_freq(rps, max_freq));
978 
979 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
980 			    rp_state_cap >> 0) & 0xff;
981 		max_freq *= (IS_GEN9_BC(dev_priv) ||
982 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
983 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
984 			   intel_gpu_freq(rps, max_freq));
985 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
986 			   intel_gpu_freq(rps, rps->max_freq));
987 
988 		seq_printf(m, "Current freq: %d MHz\n",
989 			   intel_gpu_freq(rps, rps->cur_freq));
990 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
991 		seq_printf(m, "Idle freq: %d MHz\n",
992 			   intel_gpu_freq(rps, rps->idle_freq));
993 		seq_printf(m, "Min freq: %d MHz\n",
994 			   intel_gpu_freq(rps, rps->min_freq));
995 		seq_printf(m, "Boost freq: %d MHz\n",
996 			   intel_gpu_freq(rps, rps->boost_freq));
997 		seq_printf(m, "Max freq: %d MHz\n",
998 			   intel_gpu_freq(rps, rps->max_freq));
999 		seq_printf(m,
1000 			   "efficient (RPe) frequency: %d MHz\n",
1001 			   intel_gpu_freq(rps, rps->efficient_freq));
1002 	} else {
1003 		seq_puts(m, "no P-state info available\n");
1004 	}
1005 
1006 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1007 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1008 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1009 
1010 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1011 	return ret;
1012 }
1013 
1014 static int ironlake_drpc_info(struct seq_file *m)
1015 {
1016 	struct drm_i915_private *i915 = node_to_i915(m->private);
1017 	struct intel_uncore *uncore = &i915->uncore;
1018 	u32 rgvmodectl, rstdbyctl;
1019 	u16 crstandvid;
1020 
1021 	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1022 	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1023 	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1024 
1025 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1026 	seq_printf(m, "Boost freq: %d\n",
1027 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1028 		   MEMMODE_BOOST_FREQ_SHIFT);
1029 	seq_printf(m, "HW control enabled: %s\n",
1030 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1031 	seq_printf(m, "SW control enabled: %s\n",
1032 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1033 	seq_printf(m, "Gated voltage change: %s\n",
1034 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1035 	seq_printf(m, "Starting frequency: P%d\n",
1036 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1037 	seq_printf(m, "Max P-state: P%d\n",
1038 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1039 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1040 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1041 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1042 	seq_printf(m, "Render standby enabled: %s\n",
1043 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1044 	seq_puts(m, "Current RS state: ");
1045 	switch (rstdbyctl & RSX_STATUS_MASK) {
1046 	case RSX_STATUS_ON:
1047 		seq_puts(m, "on\n");
1048 		break;
1049 	case RSX_STATUS_RC1:
1050 		seq_puts(m, "RC1\n");
1051 		break;
1052 	case RSX_STATUS_RC1E:
1053 		seq_puts(m, "RC1E\n");
1054 		break;
1055 	case RSX_STATUS_RS1:
1056 		seq_puts(m, "RS1\n");
1057 		break;
1058 	case RSX_STATUS_RS2:
1059 		seq_puts(m, "RS2 (RC6)\n");
1060 		break;
1061 	case RSX_STATUS_RS3:
1062 		seq_puts(m, "RC3 (RC6+)\n");
1063 		break;
1064 	default:
1065 		seq_puts(m, "unknown\n");
1066 		break;
1067 	}
1068 
1069 	return 0;
1070 }
1071 
1072 static int i915_forcewake_domains(struct seq_file *m, void *data)
1073 {
1074 	struct drm_i915_private *i915 = node_to_i915(m->private);
1075 	struct intel_uncore *uncore = &i915->uncore;
1076 	struct intel_uncore_forcewake_domain *fw_domain;
1077 	unsigned int tmp;
1078 
1079 	seq_printf(m, "user.bypass_count = %u\n",
1080 		   uncore->user_forcewake_count);
1081 
1082 	for_each_fw_domain(fw_domain, uncore, tmp)
1083 		seq_printf(m, "%s.wake_count = %u\n",
1084 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1085 			   READ_ONCE(fw_domain->wake_count));
1086 
1087 	return 0;
1088 }
1089 
1090 static void print_rc6_res(struct seq_file *m,
1091 			  const char *title,
1092 			  const i915_reg_t reg)
1093 {
1094 	struct drm_i915_private *i915 = node_to_i915(m->private);
1095 	intel_wakeref_t wakeref;
1096 
1097 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
1098 		seq_printf(m, "%s %u (%llu us)\n", title,
1099 			   intel_uncore_read(&i915->uncore, reg),
1100 			   intel_rc6_residency_us(&i915->gt.rc6, reg));
1101 }
1102 
1103 static int vlv_drpc_info(struct seq_file *m)
1104 {
1105 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1106 	u32 rcctl1, pw_status;
1107 
1108 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1109 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1110 
1111 	seq_printf(m, "RC6 Enabled: %s\n",
1112 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1113 					GEN6_RC_CTL_EI_MODE(1))));
1114 	seq_printf(m, "Render Power Well: %s\n",
1115 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1116 	seq_printf(m, "Media Power Well: %s\n",
1117 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1118 
1119 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1120 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1121 
1122 	return i915_forcewake_domains(m, NULL);
1123 }
1124 
1125 static int gen6_drpc_info(struct seq_file *m)
1126 {
1127 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1128 	u32 gt_core_status, rcctl1, rc6vids = 0;
1129 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1130 
1131 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1132 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1133 
1134 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1135 	if (INTEL_GEN(dev_priv) >= 9) {
1136 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1137 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1138 	}
1139 
1140 	if (INTEL_GEN(dev_priv) <= 7)
1141 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1142 				       &rc6vids, NULL);
1143 
1144 	seq_printf(m, "RC1e Enabled: %s\n",
1145 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1146 	seq_printf(m, "RC6 Enabled: %s\n",
1147 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1148 	if (INTEL_GEN(dev_priv) >= 9) {
1149 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1150 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1151 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1152 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1153 	}
1154 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1155 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1156 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1157 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1158 	seq_puts(m, "Current RC state: ");
1159 	switch (gt_core_status & GEN6_RCn_MASK) {
1160 	case GEN6_RC0:
1161 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1162 			seq_puts(m, "Core Power Down\n");
1163 		else
1164 			seq_puts(m, "on\n");
1165 		break;
1166 	case GEN6_RC3:
1167 		seq_puts(m, "RC3\n");
1168 		break;
1169 	case GEN6_RC6:
1170 		seq_puts(m, "RC6\n");
1171 		break;
1172 	case GEN6_RC7:
1173 		seq_puts(m, "RC7\n");
1174 		break;
1175 	default:
1176 		seq_puts(m, "Unknown\n");
1177 		break;
1178 	}
1179 
1180 	seq_printf(m, "Core Power Down: %s\n",
1181 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1182 	if (INTEL_GEN(dev_priv) >= 9) {
1183 		seq_printf(m, "Render Power Well: %s\n",
1184 			(gen9_powergate_status &
1185 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1186 		seq_printf(m, "Media Power Well: %s\n",
1187 			(gen9_powergate_status &
1188 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1189 	}
1190 
1191 	/* Not exactly sure what this is */
1192 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1193 		      GEN6_GT_GFX_RC6_LOCKED);
1194 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1195 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1196 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1197 
1198 	if (INTEL_GEN(dev_priv) <= 7) {
1199 		seq_printf(m, "RC6   voltage: %dmV\n",
1200 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1201 		seq_printf(m, "RC6+  voltage: %dmV\n",
1202 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1203 		seq_printf(m, "RC6++ voltage: %dmV\n",
1204 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1205 	}
1206 
1207 	return i915_forcewake_domains(m, NULL);
1208 }
1209 
1210 static int i915_drpc_info(struct seq_file *m, void *unused)
1211 {
1212 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1213 	intel_wakeref_t wakeref;
1214 	int err = -ENODEV;
1215 
1216 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1217 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1218 			err = vlv_drpc_info(m);
1219 		else if (INTEL_GEN(dev_priv) >= 6)
1220 			err = gen6_drpc_info(m);
1221 		else
1222 			err = ironlake_drpc_info(m);
1223 	}
1224 
1225 	return err;
1226 }
1227 
1228 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1229 {
1230 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1231 
1232 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1233 		   dev_priv->fb_tracking.busy_bits);
1234 
1235 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1236 		   dev_priv->fb_tracking.flip_bits);
1237 
1238 	return 0;
1239 }
1240 
1241 static int i915_fbc_status(struct seq_file *m, void *unused)
1242 {
1243 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1244 	struct intel_fbc *fbc = &dev_priv->fbc;
1245 	intel_wakeref_t wakeref;
1246 
1247 	if (!HAS_FBC(dev_priv))
1248 		return -ENODEV;
1249 
1250 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1251 	mutex_lock(&fbc->lock);
1252 
1253 	if (intel_fbc_is_active(dev_priv))
1254 		seq_puts(m, "FBC enabled\n");
1255 	else
1256 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1257 
1258 	if (intel_fbc_is_active(dev_priv)) {
1259 		u32 mask;
1260 
1261 		if (INTEL_GEN(dev_priv) >= 8)
1262 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1263 		else if (INTEL_GEN(dev_priv) >= 7)
1264 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1265 		else if (INTEL_GEN(dev_priv) >= 5)
1266 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1267 		else if (IS_G4X(dev_priv))
1268 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1269 		else
1270 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1271 							FBC_STAT_COMPRESSED);
1272 
1273 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1274 	}
1275 
1276 	mutex_unlock(&fbc->lock);
1277 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1278 
1279 	return 0;
1280 }
1281 
1282 static int i915_fbc_false_color_get(void *data, u64 *val)
1283 {
1284 	struct drm_i915_private *dev_priv = data;
1285 
1286 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1287 		return -ENODEV;
1288 
1289 	*val = dev_priv->fbc.false_color;
1290 
1291 	return 0;
1292 }
1293 
1294 static int i915_fbc_false_color_set(void *data, u64 val)
1295 {
1296 	struct drm_i915_private *dev_priv = data;
1297 	u32 reg;
1298 
1299 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1300 		return -ENODEV;
1301 
1302 	mutex_lock(&dev_priv->fbc.lock);
1303 
1304 	reg = I915_READ(ILK_DPFC_CONTROL);
1305 	dev_priv->fbc.false_color = val;
1306 
1307 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1308 		   (reg | FBC_CTL_FALSE_COLOR) :
1309 		   (reg & ~FBC_CTL_FALSE_COLOR));
1310 
1311 	mutex_unlock(&dev_priv->fbc.lock);
1312 	return 0;
1313 }
1314 
1315 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1316 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1317 			"%llu\n");
1318 
1319 static int i915_ips_status(struct seq_file *m, void *unused)
1320 {
1321 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1322 	intel_wakeref_t wakeref;
1323 
1324 	if (!HAS_IPS(dev_priv))
1325 		return -ENODEV;
1326 
1327 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1328 
1329 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1330 		   yesno(i915_modparams.enable_ips));
1331 
1332 	if (INTEL_GEN(dev_priv) >= 8) {
1333 		seq_puts(m, "Currently: unknown\n");
1334 	} else {
1335 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1336 			seq_puts(m, "Currently: enabled\n");
1337 		else
1338 			seq_puts(m, "Currently: disabled\n");
1339 	}
1340 
1341 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1342 
1343 	return 0;
1344 }
1345 
1346 static int i915_sr_status(struct seq_file *m, void *unused)
1347 {
1348 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1349 	intel_wakeref_t wakeref;
1350 	bool sr_enabled = false;
1351 
1352 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1353 
1354 	if (INTEL_GEN(dev_priv) >= 9)
1355 		/* no global SR status; inspect per-plane WM */;
1356 	else if (HAS_PCH_SPLIT(dev_priv))
1357 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1358 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1359 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1360 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1361 	else if (IS_I915GM(dev_priv))
1362 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1363 	else if (IS_PINEVIEW(dev_priv))
1364 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1365 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1366 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1367 
1368 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1369 
1370 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1371 
1372 	return 0;
1373 }
1374 
1375 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1376 {
1377 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1378 	struct intel_rps *rps = &dev_priv->gt.rps;
1379 	unsigned int max_gpu_freq, min_gpu_freq;
1380 	intel_wakeref_t wakeref;
1381 	int gpu_freq, ia_freq;
1382 
1383 	if (!HAS_LLC(dev_priv))
1384 		return -ENODEV;
1385 
1386 	min_gpu_freq = rps->min_freq;
1387 	max_gpu_freq = rps->max_freq;
1388 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1389 		/* Convert GT frequency to 50 HZ units */
1390 		min_gpu_freq /= GEN9_FREQ_SCALER;
1391 		max_gpu_freq /= GEN9_FREQ_SCALER;
1392 	}
1393 
1394 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1395 
1396 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1397 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1398 		ia_freq = gpu_freq;
1399 		sandybridge_pcode_read(dev_priv,
1400 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1401 				       &ia_freq, NULL);
1402 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1403 			   intel_gpu_freq(rps,
1404 					  (gpu_freq *
1405 					   (IS_GEN9_BC(dev_priv) ||
1406 					    INTEL_GEN(dev_priv) >= 10 ?
1407 					    GEN9_FREQ_SCALER : 1))),
1408 			   ((ia_freq >> 0) & 0xff) * 100,
1409 			   ((ia_freq >> 8) & 0xff) * 100);
1410 	}
1411 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1412 
1413 	return 0;
1414 }
1415 
1416 static int i915_opregion(struct seq_file *m, void *unused)
1417 {
1418 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1419 
1420 	if (opregion->header)
1421 		seq_write(m, opregion->header, OPREGION_SIZE);
1422 
1423 	return 0;
1424 }
1425 
1426 static int i915_vbt(struct seq_file *m, void *unused)
1427 {
1428 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1429 
1430 	if (opregion->vbt)
1431 		seq_write(m, opregion->vbt, opregion->vbt_size);
1432 
1433 	return 0;
1434 }
1435 
1436 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1437 {
1438 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1439 	struct drm_device *dev = &dev_priv->drm;
1440 	struct intel_framebuffer *fbdev_fb = NULL;
1441 	struct drm_framebuffer *drm_fb;
1442 
1443 #ifdef CONFIG_DRM_FBDEV_EMULATION
1444 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1445 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1446 
1447 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1448 			   fbdev_fb->base.width,
1449 			   fbdev_fb->base.height,
1450 			   fbdev_fb->base.format->depth,
1451 			   fbdev_fb->base.format->cpp[0] * 8,
1452 			   fbdev_fb->base.modifier,
1453 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1454 		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1455 		seq_putc(m, '\n');
1456 	}
1457 #endif
1458 
1459 	mutex_lock(&dev->mode_config.fb_lock);
1460 	drm_for_each_fb(drm_fb, dev) {
1461 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1462 		if (fb == fbdev_fb)
1463 			continue;
1464 
1465 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1466 			   fb->base.width,
1467 			   fb->base.height,
1468 			   fb->base.format->depth,
1469 			   fb->base.format->cpp[0] * 8,
1470 			   fb->base.modifier,
1471 			   drm_framebuffer_read_refcount(&fb->base));
1472 		describe_obj(m, intel_fb_obj(&fb->base));
1473 		seq_putc(m, '\n');
1474 	}
1475 	mutex_unlock(&dev->mode_config.fb_lock);
1476 
1477 	return 0;
1478 }
1479 
1480 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1481 {
1482 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1483 		   ring->space, ring->head, ring->tail, ring->emit);
1484 }
1485 
1486 static int i915_context_status(struct seq_file *m, void *unused)
1487 {
1488 	struct drm_i915_private *i915 = node_to_i915(m->private);
1489 	struct i915_gem_context *ctx, *cn;
1490 
1491 	spin_lock(&i915->gem.contexts.lock);
1492 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1493 		struct i915_gem_engines_iter it;
1494 		struct intel_context *ce;
1495 
1496 		if (!kref_get_unless_zero(&ctx->ref))
1497 			continue;
1498 
1499 		spin_unlock(&i915->gem.contexts.lock);
1500 
1501 		seq_puts(m, "HW context ");
1502 		if (ctx->pid) {
1503 			struct task_struct *task;
1504 
1505 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1506 			if (task) {
1507 				seq_printf(m, "(%s [%d]) ",
1508 					   task->comm, task->pid);
1509 				put_task_struct(task);
1510 			}
1511 		} else if (IS_ERR(ctx->file_priv)) {
1512 			seq_puts(m, "(deleted) ");
1513 		} else {
1514 			seq_puts(m, "(kernel) ");
1515 		}
1516 
1517 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1518 		seq_putc(m, '\n');
1519 
1520 		for_each_gem_engine(ce,
1521 				    i915_gem_context_lock_engines(ctx), it) {
1522 			intel_context_lock_pinned(ce);
1523 			if (intel_context_is_pinned(ce)) {
1524 				seq_printf(m, "%s: ", ce->engine->name);
1525 				if (ce->state)
1526 					describe_obj(m, ce->state->obj);
1527 				describe_ctx_ring(m, ce->ring);
1528 				seq_putc(m, '\n');
1529 			}
1530 			intel_context_unlock_pinned(ce);
1531 		}
1532 		i915_gem_context_unlock_engines(ctx);
1533 
1534 		seq_putc(m, '\n');
1535 
1536 		spin_lock(&i915->gem.contexts.lock);
1537 		list_safe_reset_next(ctx, cn, link);
1538 		i915_gem_context_put(ctx);
1539 	}
1540 	spin_unlock(&i915->gem.contexts.lock);
1541 
1542 	return 0;
1543 }
1544 
1545 static const char *swizzle_string(unsigned swizzle)
1546 {
1547 	switch (swizzle) {
1548 	case I915_BIT_6_SWIZZLE_NONE:
1549 		return "none";
1550 	case I915_BIT_6_SWIZZLE_9:
1551 		return "bit9";
1552 	case I915_BIT_6_SWIZZLE_9_10:
1553 		return "bit9/bit10";
1554 	case I915_BIT_6_SWIZZLE_9_11:
1555 		return "bit9/bit11";
1556 	case I915_BIT_6_SWIZZLE_9_10_11:
1557 		return "bit9/bit10/bit11";
1558 	case I915_BIT_6_SWIZZLE_9_17:
1559 		return "bit9/bit17";
1560 	case I915_BIT_6_SWIZZLE_9_10_17:
1561 		return "bit9/bit10/bit17";
1562 	case I915_BIT_6_SWIZZLE_UNKNOWN:
1563 		return "unknown";
1564 	}
1565 
1566 	return "bug";
1567 }
1568 
1569 static int i915_swizzle_info(struct seq_file *m, void *data)
1570 {
1571 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1572 	struct intel_uncore *uncore = &dev_priv->uncore;
1573 	intel_wakeref_t wakeref;
1574 
1575 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1576 
1577 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1578 		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1579 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1580 		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1581 
1582 	if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1583 		seq_printf(m, "DDC = 0x%08x\n",
1584 			   intel_uncore_read(uncore, DCC));
1585 		seq_printf(m, "DDC2 = 0x%08x\n",
1586 			   intel_uncore_read(uncore, DCC2));
1587 		seq_printf(m, "C0DRB3 = 0x%04x\n",
1588 			   intel_uncore_read16(uncore, C0DRB3));
1589 		seq_printf(m, "C1DRB3 = 0x%04x\n",
1590 			   intel_uncore_read16(uncore, C1DRB3));
1591 	} else if (INTEL_GEN(dev_priv) >= 6) {
1592 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1593 			   intel_uncore_read(uncore, MAD_DIMM_C0));
1594 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1595 			   intel_uncore_read(uncore, MAD_DIMM_C1));
1596 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1597 			   intel_uncore_read(uncore, MAD_DIMM_C2));
1598 		seq_printf(m, "TILECTL = 0x%08x\n",
1599 			   intel_uncore_read(uncore, TILECTL));
1600 		if (INTEL_GEN(dev_priv) >= 8)
1601 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1602 				   intel_uncore_read(uncore, GAMTARBMODE));
1603 		else
1604 			seq_printf(m, "ARB_MODE = 0x%08x\n",
1605 				   intel_uncore_read(uncore, ARB_MODE));
1606 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1607 			   intel_uncore_read(uncore, DISP_ARB_CTL));
1608 	}
1609 
1610 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1611 		seq_puts(m, "L-shaped memory detected\n");
1612 
1613 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1614 
1615 	return 0;
1616 }
1617 
1618 static const char *rps_power_to_str(unsigned int power)
1619 {
1620 	static const char * const strings[] = {
1621 		[LOW_POWER] = "low power",
1622 		[BETWEEN] = "mixed",
1623 		[HIGH_POWER] = "high power",
1624 	};
1625 
1626 	if (power >= ARRAY_SIZE(strings) || !strings[power])
1627 		return "unknown";
1628 
1629 	return strings[power];
1630 }
1631 
1632 static int i915_rps_boost_info(struct seq_file *m, void *data)
1633 {
1634 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1635 	struct intel_rps *rps = &dev_priv->gt.rps;
1636 	u32 act_freq = rps->cur_freq;
1637 	intel_wakeref_t wakeref;
1638 
1639 	with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1640 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1641 			vlv_punit_get(dev_priv);
1642 			act_freq = vlv_punit_read(dev_priv,
1643 						  PUNIT_REG_GPU_FREQ_STS);
1644 			vlv_punit_put(dev_priv);
1645 			act_freq = (act_freq >> 8) & 0xff;
1646 		} else {
1647 			act_freq = intel_get_cagf(rps,
1648 						  I915_READ(GEN6_RPSTAT1));
1649 		}
1650 	}
1651 
1652 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1653 	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1654 	seq_printf(m, "Boosts outstanding? %d\n",
1655 		   atomic_read(&rps->num_waiters));
1656 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1657 	seq_printf(m, "Frequency requested %d, actual %d\n",
1658 		   intel_gpu_freq(rps, rps->cur_freq),
1659 		   intel_gpu_freq(rps, act_freq));
1660 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1661 		   intel_gpu_freq(rps, rps->min_freq),
1662 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
1663 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
1664 		   intel_gpu_freq(rps, rps->max_freq));
1665 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1666 		   intel_gpu_freq(rps, rps->idle_freq),
1667 		   intel_gpu_freq(rps, rps->efficient_freq),
1668 		   intel_gpu_freq(rps, rps->boost_freq));
1669 
1670 	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1671 
1672 	if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1673 		u32 rpup, rpupei;
1674 		u32 rpdown, rpdownei;
1675 
1676 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1677 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1678 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1679 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1680 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1681 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1682 
1683 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1684 			   rps_power_to_str(rps->power.mode));
1685 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1686 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
1687 			   rps->power.up_threshold);
1688 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1689 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1690 			   rps->power.down_threshold);
1691 	} else {
1692 		seq_puts(m, "\nRPS Autotuning inactive\n");
1693 	}
1694 
1695 	return 0;
1696 }
1697 
1698 static int i915_llc(struct seq_file *m, void *data)
1699 {
1700 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1701 	const bool edram = INTEL_GEN(dev_priv) > 8;
1702 
1703 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1704 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1705 		   dev_priv->edram_size_mb);
1706 
1707 	return 0;
1708 }
1709 
1710 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1711 {
1712 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1713 	intel_wakeref_t wakeref;
1714 	struct drm_printer p;
1715 
1716 	if (!HAS_GT_UC(dev_priv))
1717 		return -ENODEV;
1718 
1719 	p = drm_seq_file_printer(m);
1720 	intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1721 
1722 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1723 		seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1724 
1725 	return 0;
1726 }
1727 
1728 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1729 {
1730 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1731 	intel_wakeref_t wakeref;
1732 	struct drm_printer p;
1733 
1734 	if (!HAS_GT_UC(dev_priv))
1735 		return -ENODEV;
1736 
1737 	p = drm_seq_file_printer(m);
1738 	intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1739 
1740 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1741 		u32 tmp = I915_READ(GUC_STATUS);
1742 		u32 i;
1743 
1744 		seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1745 		seq_printf(m, "\tBootrom status = 0x%x\n",
1746 			   (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1747 		seq_printf(m, "\tuKernel status = 0x%x\n",
1748 			   (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1749 		seq_printf(m, "\tMIA Core status = 0x%x\n",
1750 			   (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1751 		seq_puts(m, "\nScratch registers:\n");
1752 		for (i = 0; i < 16; i++) {
1753 			seq_printf(m, "\t%2d: \t0x%x\n",
1754 				   i, I915_READ(SOFT_SCRATCH(i)));
1755 		}
1756 	}
1757 
1758 	return 0;
1759 }
1760 
1761 static const char *
1762 stringify_guc_log_type(enum guc_log_buffer_type type)
1763 {
1764 	switch (type) {
1765 	case GUC_ISR_LOG_BUFFER:
1766 		return "ISR";
1767 	case GUC_DPC_LOG_BUFFER:
1768 		return "DPC";
1769 	case GUC_CRASH_DUMP_LOG_BUFFER:
1770 		return "CRASH";
1771 	default:
1772 		MISSING_CASE(type);
1773 	}
1774 
1775 	return "";
1776 }
1777 
1778 static void i915_guc_log_info(struct seq_file *m,
1779 			      struct drm_i915_private *dev_priv)
1780 {
1781 	struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1782 	enum guc_log_buffer_type type;
1783 
1784 	if (!intel_guc_log_relay_created(log)) {
1785 		seq_puts(m, "GuC log relay not created\n");
1786 		return;
1787 	}
1788 
1789 	seq_puts(m, "GuC logging stats:\n");
1790 
1791 	seq_printf(m, "\tRelay full count: %u\n",
1792 		   log->relay.full_count);
1793 
1794 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1795 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1796 			   stringify_guc_log_type(type),
1797 			   log->stats[type].flush,
1798 			   log->stats[type].sampled_overflow);
1799 	}
1800 }
1801 
1802 static int i915_guc_info(struct seq_file *m, void *data)
1803 {
1804 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1805 	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1806 	struct intel_guc_client *client = guc->execbuf_client;
1807 
1808 	if (!USES_GUC(dev_priv))
1809 		return -ENODEV;
1810 
1811 	i915_guc_log_info(m, dev_priv);
1812 
1813 	if (!USES_GUC_SUBMISSION(dev_priv))
1814 		return 0;
1815 
1816 	GEM_BUG_ON(!guc->execbuf_client);
1817 
1818 	seq_printf(m, "\nDoorbell map:\n");
1819 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
1820 	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
1821 
1822 	seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
1823 	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1824 		   client->priority,
1825 		   client->stage_id,
1826 		   client->proc_desc_offset);
1827 	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1828 		   client->doorbell_id, client->doorbell_offset);
1829 	/* Add more as required ... */
1830 
1831 	return 0;
1832 }
1833 
1834 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1835 {
1836 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1837 	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1838 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
1839 	int index;
1840 
1841 	if (!USES_GUC_SUBMISSION(dev_priv))
1842 		return -ENODEV;
1843 
1844 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1845 		struct intel_engine_cs *engine;
1846 
1847 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1848 			continue;
1849 
1850 		seq_printf(m, "GuC stage descriptor %u:\n", index);
1851 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1852 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1853 		seq_printf(m, "\tPriority: %d\n", desc->priority);
1854 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1855 		seq_printf(m, "\tEngines used: 0x%x\n",
1856 			   desc->engines_used);
1857 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1858 			   desc->db_trigger_phy,
1859 			   desc->db_trigger_cpu,
1860 			   desc->db_trigger_uk);
1861 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
1862 			   desc->process_desc);
1863 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1864 			   desc->wq_addr, desc->wq_size);
1865 		seq_putc(m, '\n');
1866 
1867 		for_each_uabi_engine(engine, dev_priv) {
1868 			u32 guc_engine_id = engine->guc_id;
1869 			struct guc_execlist_context *lrc =
1870 						&desc->lrc[guc_engine_id];
1871 
1872 			seq_printf(m, "\t%s LRC:\n", engine->name);
1873 			seq_printf(m, "\t\tContext desc: 0x%x\n",
1874 				   lrc->context_desc);
1875 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1876 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1877 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1878 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1879 			seq_putc(m, '\n');
1880 		}
1881 	}
1882 
1883 	return 0;
1884 }
1885 
1886 static int i915_guc_log_dump(struct seq_file *m, void *data)
1887 {
1888 	struct drm_info_node *node = m->private;
1889 	struct drm_i915_private *dev_priv = node_to_i915(node);
1890 	bool dump_load_err = !!node->info_ent->data;
1891 	struct drm_i915_gem_object *obj = NULL;
1892 	u32 *log;
1893 	int i = 0;
1894 
1895 	if (!HAS_GT_UC(dev_priv))
1896 		return -ENODEV;
1897 
1898 	if (dump_load_err)
1899 		obj = dev_priv->gt.uc.load_err_log;
1900 	else if (dev_priv->gt.uc.guc.log.vma)
1901 		obj = dev_priv->gt.uc.guc.log.vma->obj;
1902 
1903 	if (!obj)
1904 		return 0;
1905 
1906 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1907 	if (IS_ERR(log)) {
1908 		DRM_DEBUG("Failed to pin object\n");
1909 		seq_puts(m, "(log data unaccessible)\n");
1910 		return PTR_ERR(log);
1911 	}
1912 
1913 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1914 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1915 			   *(log + i), *(log + i + 1),
1916 			   *(log + i + 2), *(log + i + 3));
1917 
1918 	seq_putc(m, '\n');
1919 
1920 	i915_gem_object_unpin_map(obj);
1921 
1922 	return 0;
1923 }
1924 
1925 static int i915_guc_log_level_get(void *data, u64 *val)
1926 {
1927 	struct drm_i915_private *dev_priv = data;
1928 
1929 	if (!USES_GUC(dev_priv))
1930 		return -ENODEV;
1931 
1932 	*val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
1933 
1934 	return 0;
1935 }
1936 
1937 static int i915_guc_log_level_set(void *data, u64 val)
1938 {
1939 	struct drm_i915_private *dev_priv = data;
1940 
1941 	if (!USES_GUC(dev_priv))
1942 		return -ENODEV;
1943 
1944 	return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
1945 }
1946 
1947 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
1948 			i915_guc_log_level_get, i915_guc_log_level_set,
1949 			"%lld\n");
1950 
1951 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
1952 {
1953 	struct drm_i915_private *i915 = inode->i_private;
1954 	struct intel_guc *guc = &i915->gt.uc.guc;
1955 	struct intel_guc_log *log = &guc->log;
1956 
1957 	if (!intel_guc_is_running(guc))
1958 		return -ENODEV;
1959 
1960 	file->private_data = log;
1961 
1962 	return intel_guc_log_relay_open(log);
1963 }
1964 
1965 static ssize_t
1966 i915_guc_log_relay_write(struct file *filp,
1967 			 const char __user *ubuf,
1968 			 size_t cnt,
1969 			 loff_t *ppos)
1970 {
1971 	struct intel_guc_log *log = filp->private_data;
1972 	int val;
1973 	int ret;
1974 
1975 	ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
1976 	if (ret < 0)
1977 		return ret;
1978 
1979 	/*
1980 	 * Enable and start the guc log relay on value of 1.
1981 	 * Flush log relay for any other value.
1982 	 */
1983 	if (val == 1)
1984 		ret = intel_guc_log_relay_start(log);
1985 	else
1986 		intel_guc_log_relay_flush(log);
1987 
1988 	return ret ?: cnt;
1989 }
1990 
1991 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
1992 {
1993 	struct drm_i915_private *i915 = inode->i_private;
1994 	struct intel_guc *guc = &i915->gt.uc.guc;
1995 
1996 	intel_guc_log_relay_close(&guc->log);
1997 	return 0;
1998 }
1999 
2000 static const struct file_operations i915_guc_log_relay_fops = {
2001 	.owner = THIS_MODULE,
2002 	.open = i915_guc_log_relay_open,
2003 	.write = i915_guc_log_relay_write,
2004 	.release = i915_guc_log_relay_release,
2005 };
2006 
2007 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2008 {
2009 	u8 val;
2010 	static const char * const sink_status[] = {
2011 		"inactive",
2012 		"transition to active, capture and display",
2013 		"active, display from RFB",
2014 		"active, capture and display on sink device timings",
2015 		"transition to inactive, capture and display, timing re-sync",
2016 		"reserved",
2017 		"reserved",
2018 		"sink internal error",
2019 	};
2020 	struct drm_connector *connector = m->private;
2021 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2022 	struct intel_dp *intel_dp =
2023 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2024 	int ret;
2025 
2026 	if (!CAN_PSR(dev_priv)) {
2027 		seq_puts(m, "PSR Unsupported\n");
2028 		return -ENODEV;
2029 	}
2030 
2031 	if (connector->status != connector_status_connected)
2032 		return -ENODEV;
2033 
2034 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2035 
2036 	if (ret == 1) {
2037 		const char *str = "unknown";
2038 
2039 		val &= DP_PSR_SINK_STATE_MASK;
2040 		if (val < ARRAY_SIZE(sink_status))
2041 			str = sink_status[val];
2042 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2043 	} else {
2044 		return ret;
2045 	}
2046 
2047 	return 0;
2048 }
2049 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2050 
2051 static void
2052 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2053 {
2054 	u32 val, status_val;
2055 	const char *status = "unknown";
2056 
2057 	if (dev_priv->psr.psr2_enabled) {
2058 		static const char * const live_status[] = {
2059 			"IDLE",
2060 			"CAPTURE",
2061 			"CAPTURE_FS",
2062 			"SLEEP",
2063 			"BUFON_FW",
2064 			"ML_UP",
2065 			"SU_STANDBY",
2066 			"FAST_SLEEP",
2067 			"DEEP_SLEEP",
2068 			"BUF_ON",
2069 			"TG_ON"
2070 		};
2071 		val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
2072 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2073 			      EDP_PSR2_STATUS_STATE_SHIFT;
2074 		if (status_val < ARRAY_SIZE(live_status))
2075 			status = live_status[status_val];
2076 	} else {
2077 		static const char * const live_status[] = {
2078 			"IDLE",
2079 			"SRDONACK",
2080 			"SRDENT",
2081 			"BUFOFF",
2082 			"BUFON",
2083 			"AUXACK",
2084 			"SRDOFFACK",
2085 			"SRDENT_ON",
2086 		};
2087 		val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
2088 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2089 			      EDP_PSR_STATUS_STATE_SHIFT;
2090 		if (status_val < ARRAY_SIZE(live_status))
2091 			status = live_status[status_val];
2092 	}
2093 
2094 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2095 }
2096 
2097 static int i915_edp_psr_status(struct seq_file *m, void *data)
2098 {
2099 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2100 	struct i915_psr *psr = &dev_priv->psr;
2101 	intel_wakeref_t wakeref;
2102 	const char *status;
2103 	bool enabled;
2104 	u32 val;
2105 
2106 	if (!HAS_PSR(dev_priv))
2107 		return -ENODEV;
2108 
2109 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2110 	if (psr->dp)
2111 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2112 	seq_puts(m, "\n");
2113 
2114 	if (!psr->sink_support)
2115 		return 0;
2116 
2117 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2118 	mutex_lock(&psr->lock);
2119 
2120 	if (psr->enabled)
2121 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2122 	else
2123 		status = "disabled";
2124 	seq_printf(m, "PSR mode: %s\n", status);
2125 
2126 	if (!psr->enabled) {
2127 		seq_printf(m, "PSR sink not reliable: %s\n",
2128 			   yesno(psr->sink_not_reliable));
2129 
2130 		goto unlock;
2131 	}
2132 
2133 	if (psr->psr2_enabled) {
2134 		val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
2135 		enabled = val & EDP_PSR2_ENABLE;
2136 	} else {
2137 		val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
2138 		enabled = val & EDP_PSR_ENABLE;
2139 	}
2140 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2141 		   enableddisabled(enabled), val);
2142 	psr_source_status(dev_priv, m);
2143 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2144 		   psr->busy_frontbuffer_bits);
2145 
2146 	/*
2147 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2148 	 */
2149 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2150 		val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
2151 		val &= EDP_PSR_PERF_CNT_MASK;
2152 		seq_printf(m, "Performance counter: %u\n", val);
2153 	}
2154 
2155 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
2156 		seq_printf(m, "Last attempted entry at: %lld\n",
2157 			   psr->last_entry_attempt);
2158 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2159 	}
2160 
2161 	if (psr->psr2_enabled) {
2162 		u32 su_frames_val[3];
2163 		int frame;
2164 
2165 		/*
2166 		 * Reading all 3 registers before hand to minimize crossing a
2167 		 * frame boundary between register reads
2168 		 */
2169 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2170 			val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
2171 						       frame));
2172 			su_frames_val[frame / 3] = val;
2173 		}
2174 
2175 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2176 
2177 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2178 			u32 su_blocks;
2179 
2180 			su_blocks = su_frames_val[frame / 3] &
2181 				    PSR2_SU_STATUS_MASK(frame);
2182 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2183 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
2184 		}
2185 	}
2186 
2187 unlock:
2188 	mutex_unlock(&psr->lock);
2189 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2190 
2191 	return 0;
2192 }
2193 
2194 static int
2195 i915_edp_psr_debug_set(void *data, u64 val)
2196 {
2197 	struct drm_i915_private *dev_priv = data;
2198 	intel_wakeref_t wakeref;
2199 	int ret;
2200 
2201 	if (!CAN_PSR(dev_priv))
2202 		return -ENODEV;
2203 
2204 	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2205 
2206 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2207 
2208 	ret = intel_psr_debug_set(dev_priv, val);
2209 
2210 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2211 
2212 	return ret;
2213 }
2214 
2215 static int
2216 i915_edp_psr_debug_get(void *data, u64 *val)
2217 {
2218 	struct drm_i915_private *dev_priv = data;
2219 
2220 	if (!CAN_PSR(dev_priv))
2221 		return -ENODEV;
2222 
2223 	*val = READ_ONCE(dev_priv->psr.debug);
2224 	return 0;
2225 }
2226 
2227 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2228 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2229 			"%llu\n");
2230 
2231 static int i915_energy_uJ(struct seq_file *m, void *data)
2232 {
2233 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2234 	unsigned long long power;
2235 	intel_wakeref_t wakeref;
2236 	u32 units;
2237 
2238 	if (INTEL_GEN(dev_priv) < 6)
2239 		return -ENODEV;
2240 
2241 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2242 		return -ENODEV;
2243 
2244 	units = (power & 0x1f00) >> 8;
2245 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2246 		power = I915_READ(MCH_SECP_NRG_STTS);
2247 
2248 	power = (1000000 * power) >> units; /* convert to uJ */
2249 	seq_printf(m, "%llu", power);
2250 
2251 	return 0;
2252 }
2253 
2254 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2255 {
2256 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2257 	struct pci_dev *pdev = dev_priv->drm.pdev;
2258 
2259 	if (!HAS_RUNTIME_PM(dev_priv))
2260 		seq_puts(m, "Runtime power management not supported\n");
2261 
2262 	seq_printf(m, "Runtime power status: %s\n",
2263 		   enableddisabled(!dev_priv->power_domains.wakeref));
2264 
2265 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2266 	seq_printf(m, "IRQs disabled: %s\n",
2267 		   yesno(!intel_irqs_enabled(dev_priv)));
2268 #ifdef CONFIG_PM
2269 	seq_printf(m, "Usage count: %d\n",
2270 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2271 #else
2272 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2273 #endif
2274 	seq_printf(m, "PCI device power state: %s [%d]\n",
2275 		   pci_power_name(pdev->current_state),
2276 		   pdev->current_state);
2277 
2278 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2279 		struct drm_printer p = drm_seq_file_printer(m);
2280 
2281 		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2282 	}
2283 
2284 	return 0;
2285 }
2286 
2287 static int i915_power_domain_info(struct seq_file *m, void *unused)
2288 {
2289 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2290 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2291 	int i;
2292 
2293 	mutex_lock(&power_domains->lock);
2294 
2295 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2296 	for (i = 0; i < power_domains->power_well_count; i++) {
2297 		struct i915_power_well *power_well;
2298 		enum intel_display_power_domain power_domain;
2299 
2300 		power_well = &power_domains->power_wells[i];
2301 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
2302 			   power_well->count);
2303 
2304 		for_each_power_domain(power_domain, power_well->desc->domains)
2305 			seq_printf(m, "  %-23s %d\n",
2306 				 intel_display_power_domain_str(power_domain),
2307 				 power_domains->domain_use_count[power_domain]);
2308 	}
2309 
2310 	mutex_unlock(&power_domains->lock);
2311 
2312 	return 0;
2313 }
2314 
2315 static int i915_dmc_info(struct seq_file *m, void *unused)
2316 {
2317 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2318 	intel_wakeref_t wakeref;
2319 	struct intel_csr *csr;
2320 	i915_reg_t dc5_reg, dc6_reg = {};
2321 
2322 	if (!HAS_CSR(dev_priv))
2323 		return -ENODEV;
2324 
2325 	csr = &dev_priv->csr;
2326 
2327 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2328 
2329 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2330 	seq_printf(m, "path: %s\n", csr->fw_path);
2331 
2332 	if (!csr->dmc_payload)
2333 		goto out;
2334 
2335 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2336 		   CSR_VERSION_MINOR(csr->version));
2337 
2338 	if (INTEL_GEN(dev_priv) >= 12) {
2339 		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2340 		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2341 		/*
2342 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
2343 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
2344 		 * reg for DC3CO debugging and validation,
2345 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
2346 		 */
2347 		seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
2348 	} else {
2349 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2350 						 SKL_CSR_DC3_DC5_COUNT;
2351 		if (!IS_GEN9_LP(dev_priv))
2352 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2353 	}
2354 
2355 	seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2356 	if (dc6_reg.reg)
2357 		seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
2358 
2359 out:
2360 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2361 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2362 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2363 
2364 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2365 
2366 	return 0;
2367 }
2368 
2369 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2370 				 struct drm_display_mode *mode)
2371 {
2372 	int i;
2373 
2374 	for (i = 0; i < tabs; i++)
2375 		seq_putc(m, '\t');
2376 
2377 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2378 }
2379 
2380 static void intel_encoder_info(struct seq_file *m,
2381 			       struct intel_crtc *intel_crtc,
2382 			       struct intel_encoder *intel_encoder)
2383 {
2384 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2385 	struct drm_device *dev = &dev_priv->drm;
2386 	struct drm_crtc *crtc = &intel_crtc->base;
2387 	struct intel_connector *intel_connector;
2388 	struct drm_encoder *encoder;
2389 
2390 	encoder = &intel_encoder->base;
2391 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2392 		   encoder->base.id, encoder->name);
2393 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2394 		struct drm_connector *connector = &intel_connector->base;
2395 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2396 			   connector->base.id,
2397 			   connector->name,
2398 			   drm_get_connector_status_name(connector->status));
2399 		if (connector->status == connector_status_connected) {
2400 			struct drm_display_mode *mode = &crtc->mode;
2401 			seq_printf(m, ", mode:\n");
2402 			intel_seq_print_mode(m, 2, mode);
2403 		} else {
2404 			seq_putc(m, '\n');
2405 		}
2406 	}
2407 }
2408 
2409 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2410 {
2411 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2412 	struct drm_device *dev = &dev_priv->drm;
2413 	struct drm_crtc *crtc = &intel_crtc->base;
2414 	struct intel_encoder *intel_encoder;
2415 	struct drm_plane_state *plane_state = crtc->primary->state;
2416 	struct drm_framebuffer *fb = plane_state->fb;
2417 
2418 	if (fb)
2419 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2420 			   fb->base.id, plane_state->src_x >> 16,
2421 			   plane_state->src_y >> 16, fb->width, fb->height);
2422 	else
2423 		seq_puts(m, "\tprimary plane disabled\n");
2424 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2425 		intel_encoder_info(m, intel_crtc, intel_encoder);
2426 }
2427 
2428 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2429 {
2430 	struct drm_display_mode *mode = panel->fixed_mode;
2431 
2432 	seq_printf(m, "\tfixed mode:\n");
2433 	intel_seq_print_mode(m, 2, mode);
2434 }
2435 
2436 static void intel_hdcp_info(struct seq_file *m,
2437 			    struct intel_connector *intel_connector)
2438 {
2439 	bool hdcp_cap, hdcp2_cap;
2440 
2441 	hdcp_cap = intel_hdcp_capable(intel_connector);
2442 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
2443 
2444 	if (hdcp_cap)
2445 		seq_puts(m, "HDCP1.4 ");
2446 	if (hdcp2_cap)
2447 		seq_puts(m, "HDCP2.2 ");
2448 
2449 	if (!hdcp_cap && !hdcp2_cap)
2450 		seq_puts(m, "None");
2451 
2452 	seq_puts(m, "\n");
2453 }
2454 
2455 static void intel_dp_info(struct seq_file *m,
2456 			  struct intel_connector *intel_connector)
2457 {
2458 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2459 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2460 
2461 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2462 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2463 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2464 		intel_panel_info(m, &intel_connector->panel);
2465 
2466 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2467 				&intel_dp->aux);
2468 	if (intel_connector->hdcp.shim) {
2469 		seq_puts(m, "\tHDCP version: ");
2470 		intel_hdcp_info(m, intel_connector);
2471 	}
2472 }
2473 
2474 static void intel_dp_mst_info(struct seq_file *m,
2475 			  struct intel_connector *intel_connector)
2476 {
2477 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2478 	struct intel_dp_mst_encoder *intel_mst =
2479 		enc_to_mst(&intel_encoder->base);
2480 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
2481 	struct intel_dp *intel_dp = &intel_dig_port->dp;
2482 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2483 					intel_connector->port);
2484 
2485 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2486 }
2487 
2488 static void intel_hdmi_info(struct seq_file *m,
2489 			    struct intel_connector *intel_connector)
2490 {
2491 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2492 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2493 
2494 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2495 	if (intel_connector->hdcp.shim) {
2496 		seq_puts(m, "\tHDCP version: ");
2497 		intel_hdcp_info(m, intel_connector);
2498 	}
2499 }
2500 
2501 static void intel_lvds_info(struct seq_file *m,
2502 			    struct intel_connector *intel_connector)
2503 {
2504 	intel_panel_info(m, &intel_connector->panel);
2505 }
2506 
2507 static void intel_connector_info(struct seq_file *m,
2508 				 struct drm_connector *connector)
2509 {
2510 	struct intel_connector *intel_connector = to_intel_connector(connector);
2511 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2512 	struct drm_display_mode *mode;
2513 
2514 	seq_printf(m, "connector %d: type %s, status: %s\n",
2515 		   connector->base.id, connector->name,
2516 		   drm_get_connector_status_name(connector->status));
2517 
2518 	if (connector->status == connector_status_disconnected)
2519 		return;
2520 
2521 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2522 		   connector->display_info.width_mm,
2523 		   connector->display_info.height_mm);
2524 	seq_printf(m, "\tsubpixel order: %s\n",
2525 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2526 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2527 
2528 	if (!intel_encoder)
2529 		return;
2530 
2531 	switch (connector->connector_type) {
2532 	case DRM_MODE_CONNECTOR_DisplayPort:
2533 	case DRM_MODE_CONNECTOR_eDP:
2534 		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2535 			intel_dp_mst_info(m, intel_connector);
2536 		else
2537 			intel_dp_info(m, intel_connector);
2538 		break;
2539 	case DRM_MODE_CONNECTOR_LVDS:
2540 		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2541 			intel_lvds_info(m, intel_connector);
2542 		break;
2543 	case DRM_MODE_CONNECTOR_HDMIA:
2544 		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2545 		    intel_encoder->type == INTEL_OUTPUT_DDI)
2546 			intel_hdmi_info(m, intel_connector);
2547 		break;
2548 	default:
2549 		break;
2550 	}
2551 
2552 	seq_printf(m, "\tmodes:\n");
2553 	list_for_each_entry(mode, &connector->modes, head)
2554 		intel_seq_print_mode(m, 2, mode);
2555 }
2556 
2557 static const char *plane_type(enum drm_plane_type type)
2558 {
2559 	switch (type) {
2560 	case DRM_PLANE_TYPE_OVERLAY:
2561 		return "OVL";
2562 	case DRM_PLANE_TYPE_PRIMARY:
2563 		return "PRI";
2564 	case DRM_PLANE_TYPE_CURSOR:
2565 		return "CUR";
2566 	/*
2567 	 * Deliberately omitting default: to generate compiler warnings
2568 	 * when a new drm_plane_type gets added.
2569 	 */
2570 	}
2571 
2572 	return "unknown";
2573 }
2574 
2575 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2576 {
2577 	/*
2578 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2579 	 * will print them all to visualize if the values are misused
2580 	 */
2581 	snprintf(buf, bufsize,
2582 		 "%s%s%s%s%s%s(0x%08x)",
2583 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2584 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2585 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2586 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2587 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2588 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2589 		 rotation);
2590 }
2591 
2592 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2593 {
2594 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2595 	struct drm_device *dev = &dev_priv->drm;
2596 	struct intel_plane *intel_plane;
2597 
2598 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2599 		struct drm_plane_state *state;
2600 		struct drm_plane *plane = &intel_plane->base;
2601 		struct drm_format_name_buf format_name;
2602 		char rot_str[48];
2603 
2604 		if (!plane->state) {
2605 			seq_puts(m, "plane->state is NULL!\n");
2606 			continue;
2607 		}
2608 
2609 		state = plane->state;
2610 
2611 		if (state->fb) {
2612 			drm_get_format_name(state->fb->format->format,
2613 					    &format_name);
2614 		} else {
2615 			sprintf(format_name.str, "N/A");
2616 		}
2617 
2618 		plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2619 
2620 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2621 			   plane->base.id,
2622 			   plane_type(intel_plane->base.type),
2623 			   state->crtc_x, state->crtc_y,
2624 			   state->crtc_w, state->crtc_h,
2625 			   (state->src_x >> 16),
2626 			   ((state->src_x & 0xffff) * 15625) >> 10,
2627 			   (state->src_y >> 16),
2628 			   ((state->src_y & 0xffff) * 15625) >> 10,
2629 			   (state->src_w >> 16),
2630 			   ((state->src_w & 0xffff) * 15625) >> 10,
2631 			   (state->src_h >> 16),
2632 			   ((state->src_h & 0xffff) * 15625) >> 10,
2633 			   format_name.str,
2634 			   rot_str);
2635 	}
2636 }
2637 
2638 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2639 {
2640 	struct intel_crtc_state *pipe_config;
2641 	int num_scalers = intel_crtc->num_scalers;
2642 	int i;
2643 
2644 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2645 
2646 	/* Not all platformas have a scaler */
2647 	if (num_scalers) {
2648 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2649 			   num_scalers,
2650 			   pipe_config->scaler_state.scaler_users,
2651 			   pipe_config->scaler_state.scaler_id);
2652 
2653 		for (i = 0; i < num_scalers; i++) {
2654 			struct intel_scaler *sc =
2655 					&pipe_config->scaler_state.scalers[i];
2656 
2657 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2658 				   i, yesno(sc->in_use), sc->mode);
2659 		}
2660 		seq_puts(m, "\n");
2661 	} else {
2662 		seq_puts(m, "\tNo scalers available on this platform\n");
2663 	}
2664 }
2665 
2666 static int i915_display_info(struct seq_file *m, void *unused)
2667 {
2668 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2669 	struct drm_device *dev = &dev_priv->drm;
2670 	struct intel_crtc *crtc;
2671 	struct drm_connector *connector;
2672 	struct drm_connector_list_iter conn_iter;
2673 	intel_wakeref_t wakeref;
2674 
2675 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2676 
2677 	seq_printf(m, "CRTC info\n");
2678 	seq_printf(m, "---------\n");
2679 	for_each_intel_crtc(dev, crtc) {
2680 		struct intel_crtc_state *pipe_config;
2681 
2682 		drm_modeset_lock(&crtc->base.mutex, NULL);
2683 		pipe_config = to_intel_crtc_state(crtc->base.state);
2684 
2685 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2686 			   crtc->base.base.id, pipe_name(crtc->pipe),
2687 			   yesno(pipe_config->base.active),
2688 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2689 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
2690 
2691 		if (pipe_config->base.active) {
2692 			struct intel_plane *cursor =
2693 				to_intel_plane(crtc->base.cursor);
2694 
2695 			intel_crtc_info(m, crtc);
2696 
2697 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2698 				   yesno(cursor->base.state->visible),
2699 				   cursor->base.state->crtc_x,
2700 				   cursor->base.state->crtc_y,
2701 				   cursor->base.state->crtc_w,
2702 				   cursor->base.state->crtc_h,
2703 				   cursor->cursor.base);
2704 			intel_scaler_info(m, crtc);
2705 			intel_plane_info(m, crtc);
2706 		}
2707 
2708 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2709 			   yesno(!crtc->cpu_fifo_underrun_disabled),
2710 			   yesno(!crtc->pch_fifo_underrun_disabled));
2711 		drm_modeset_unlock(&crtc->base.mutex);
2712 	}
2713 
2714 	seq_printf(m, "\n");
2715 	seq_printf(m, "Connector info\n");
2716 	seq_printf(m, "--------------\n");
2717 	mutex_lock(&dev->mode_config.mutex);
2718 	drm_connector_list_iter_begin(dev, &conn_iter);
2719 	drm_for_each_connector_iter(connector, &conn_iter)
2720 		intel_connector_info(m, connector);
2721 	drm_connector_list_iter_end(&conn_iter);
2722 	mutex_unlock(&dev->mode_config.mutex);
2723 
2724 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2725 
2726 	return 0;
2727 }
2728 
2729 static int i915_engine_info(struct seq_file *m, void *unused)
2730 {
2731 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2732 	struct intel_engine_cs *engine;
2733 	intel_wakeref_t wakeref;
2734 	struct drm_printer p;
2735 
2736 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2737 
2738 	seq_printf(m, "GT awake? %s [%d]\n",
2739 		   yesno(dev_priv->gt.awake),
2740 		   atomic_read(&dev_priv->gt.wakeref.count));
2741 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
2742 		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2743 
2744 	p = drm_seq_file_printer(m);
2745 	for_each_uabi_engine(engine, dev_priv)
2746 		intel_engine_dump(engine, &p, "%s\n", engine->name);
2747 
2748 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2749 
2750 	return 0;
2751 }
2752 
2753 static int i915_rcs_topology(struct seq_file *m, void *unused)
2754 {
2755 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2756 	struct drm_printer p = drm_seq_file_printer(m);
2757 
2758 	intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2759 
2760 	return 0;
2761 }
2762 
2763 static int i915_shrinker_info(struct seq_file *m, void *unused)
2764 {
2765 	struct drm_i915_private *i915 = node_to_i915(m->private);
2766 
2767 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2768 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2769 
2770 	return 0;
2771 }
2772 
2773 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2774 {
2775 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2776 	struct drm_device *dev = &dev_priv->drm;
2777 	int i;
2778 
2779 	drm_modeset_lock_all(dev);
2780 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2781 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2782 
2783 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2784 			   pll->info->id);
2785 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2786 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2787 		seq_printf(m, " tracked hardware state:\n");
2788 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
2789 		seq_printf(m, " dpll_md: 0x%08x\n",
2790 			   pll->state.hw_state.dpll_md);
2791 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
2792 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
2793 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
2794 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
2795 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
2796 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
2797 			   pll->state.hw_state.mg_refclkin_ctl);
2798 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2799 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
2800 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
2801 			   pll->state.hw_state.mg_clktop2_hsclkctl);
2802 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
2803 			   pll->state.hw_state.mg_pll_div0);
2804 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
2805 			   pll->state.hw_state.mg_pll_div1);
2806 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
2807 			   pll->state.hw_state.mg_pll_lf);
2808 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2809 			   pll->state.hw_state.mg_pll_frac_lock);
2810 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
2811 			   pll->state.hw_state.mg_pll_ssc);
2812 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
2813 			   pll->state.hw_state.mg_pll_bias);
2814 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2815 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
2816 	}
2817 	drm_modeset_unlock_all(dev);
2818 
2819 	return 0;
2820 }
2821 
2822 static int i915_wa_registers(struct seq_file *m, void *unused)
2823 {
2824 	struct drm_i915_private *i915 = node_to_i915(m->private);
2825 	struct intel_engine_cs *engine;
2826 
2827 	for_each_uabi_engine(engine, i915) {
2828 		const struct i915_wa_list *wal = &engine->ctx_wa_list;
2829 		const struct i915_wa *wa;
2830 		unsigned int count;
2831 
2832 		count = wal->count;
2833 		if (!count)
2834 			continue;
2835 
2836 		seq_printf(m, "%s: Workarounds applied: %u\n",
2837 			   engine->name, count);
2838 
2839 		for (wa = wal->list; count--; wa++)
2840 			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2841 				   i915_mmio_reg_offset(wa->reg),
2842 				   wa->val, wa->mask);
2843 
2844 		seq_printf(m, "\n");
2845 	}
2846 
2847 	return 0;
2848 }
2849 
2850 static int i915_ipc_status_show(struct seq_file *m, void *data)
2851 {
2852 	struct drm_i915_private *dev_priv = m->private;
2853 
2854 	seq_printf(m, "Isochronous Priority Control: %s\n",
2855 			yesno(dev_priv->ipc_enabled));
2856 	return 0;
2857 }
2858 
2859 static int i915_ipc_status_open(struct inode *inode, struct file *file)
2860 {
2861 	struct drm_i915_private *dev_priv = inode->i_private;
2862 
2863 	if (!HAS_IPC(dev_priv))
2864 		return -ENODEV;
2865 
2866 	return single_open(file, i915_ipc_status_show, dev_priv);
2867 }
2868 
2869 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2870 				     size_t len, loff_t *offp)
2871 {
2872 	struct seq_file *m = file->private_data;
2873 	struct drm_i915_private *dev_priv = m->private;
2874 	intel_wakeref_t wakeref;
2875 	bool enable;
2876 	int ret;
2877 
2878 	ret = kstrtobool_from_user(ubuf, len, &enable);
2879 	if (ret < 0)
2880 		return ret;
2881 
2882 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2883 		if (!dev_priv->ipc_enabled && enable)
2884 			DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2885 		dev_priv->wm.distrust_bios_wm = true;
2886 		dev_priv->ipc_enabled = enable;
2887 		intel_enable_ipc(dev_priv);
2888 	}
2889 
2890 	return len;
2891 }
2892 
2893 static const struct file_operations i915_ipc_status_fops = {
2894 	.owner = THIS_MODULE,
2895 	.open = i915_ipc_status_open,
2896 	.read = seq_read,
2897 	.llseek = seq_lseek,
2898 	.release = single_release,
2899 	.write = i915_ipc_status_write
2900 };
2901 
2902 static int i915_ddb_info(struct seq_file *m, void *unused)
2903 {
2904 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2905 	struct drm_device *dev = &dev_priv->drm;
2906 	struct skl_ddb_entry *entry;
2907 	struct intel_crtc *crtc;
2908 
2909 	if (INTEL_GEN(dev_priv) < 9)
2910 		return -ENODEV;
2911 
2912 	drm_modeset_lock_all(dev);
2913 
2914 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2915 
2916 	for_each_intel_crtc(&dev_priv->drm, crtc) {
2917 		struct intel_crtc_state *crtc_state =
2918 			to_intel_crtc_state(crtc->base.state);
2919 		enum pipe pipe = crtc->pipe;
2920 		enum plane_id plane_id;
2921 
2922 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2923 
2924 		for_each_plane_id_on_crtc(crtc, plane_id) {
2925 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2926 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
2927 				   entry->start, entry->end,
2928 				   skl_ddb_entry_size(entry));
2929 		}
2930 
2931 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
2932 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
2933 			   entry->end, skl_ddb_entry_size(entry));
2934 	}
2935 
2936 	drm_modeset_unlock_all(dev);
2937 
2938 	return 0;
2939 }
2940 
2941 static void drrs_status_per_crtc(struct seq_file *m,
2942 				 struct drm_device *dev,
2943 				 struct intel_crtc *intel_crtc)
2944 {
2945 	struct drm_i915_private *dev_priv = to_i915(dev);
2946 	struct i915_drrs *drrs = &dev_priv->drrs;
2947 	int vrefresh = 0;
2948 	struct drm_connector *connector;
2949 	struct drm_connector_list_iter conn_iter;
2950 
2951 	drm_connector_list_iter_begin(dev, &conn_iter);
2952 	drm_for_each_connector_iter(connector, &conn_iter) {
2953 		if (connector->state->crtc != &intel_crtc->base)
2954 			continue;
2955 
2956 		seq_printf(m, "%s:\n", connector->name);
2957 	}
2958 	drm_connector_list_iter_end(&conn_iter);
2959 
2960 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
2961 		seq_puts(m, "\tVBT: DRRS_type: Static");
2962 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
2963 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
2964 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
2965 		seq_puts(m, "\tVBT: DRRS_type: None");
2966 	else
2967 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
2968 
2969 	seq_puts(m, "\n\n");
2970 
2971 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
2972 		struct intel_panel *panel;
2973 
2974 		mutex_lock(&drrs->mutex);
2975 		/* DRRS Supported */
2976 		seq_puts(m, "\tDRRS Supported: Yes\n");
2977 
2978 		/* disable_drrs() will make drrs->dp NULL */
2979 		if (!drrs->dp) {
2980 			seq_puts(m, "Idleness DRRS: Disabled\n");
2981 			if (dev_priv->psr.enabled)
2982 				seq_puts(m,
2983 				"\tAs PSR is enabled, DRRS is not enabled\n");
2984 			mutex_unlock(&drrs->mutex);
2985 			return;
2986 		}
2987 
2988 		panel = &drrs->dp->attached_connector->panel;
2989 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
2990 					drrs->busy_frontbuffer_bits);
2991 
2992 		seq_puts(m, "\n\t\t");
2993 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
2994 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
2995 			vrefresh = panel->fixed_mode->vrefresh;
2996 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
2997 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
2998 			vrefresh = panel->downclock_mode->vrefresh;
2999 		} else {
3000 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3001 						drrs->refresh_rate_type);
3002 			mutex_unlock(&drrs->mutex);
3003 			return;
3004 		}
3005 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3006 
3007 		seq_puts(m, "\n\t\t");
3008 		mutex_unlock(&drrs->mutex);
3009 	} else {
3010 		/* DRRS not supported. Print the VBT parameter*/
3011 		seq_puts(m, "\tDRRS Supported : No");
3012 	}
3013 	seq_puts(m, "\n");
3014 }
3015 
3016 static int i915_drrs_status(struct seq_file *m, void *unused)
3017 {
3018 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3019 	struct drm_device *dev = &dev_priv->drm;
3020 	struct intel_crtc *intel_crtc;
3021 	int active_crtc_cnt = 0;
3022 
3023 	drm_modeset_lock_all(dev);
3024 	for_each_intel_crtc(dev, intel_crtc) {
3025 		if (intel_crtc->base.state->active) {
3026 			active_crtc_cnt++;
3027 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3028 
3029 			drrs_status_per_crtc(m, dev, intel_crtc);
3030 		}
3031 	}
3032 	drm_modeset_unlock_all(dev);
3033 
3034 	if (!active_crtc_cnt)
3035 		seq_puts(m, "No active crtc found\n");
3036 
3037 	return 0;
3038 }
3039 
3040 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3041 {
3042 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3043 	struct drm_device *dev = &dev_priv->drm;
3044 	struct intel_encoder *intel_encoder;
3045 	struct intel_digital_port *intel_dig_port;
3046 	struct drm_connector *connector;
3047 	struct drm_connector_list_iter conn_iter;
3048 
3049 	drm_connector_list_iter_begin(dev, &conn_iter);
3050 	drm_for_each_connector_iter(connector, &conn_iter) {
3051 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3052 			continue;
3053 
3054 		intel_encoder = intel_attached_encoder(connector);
3055 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3056 			continue;
3057 
3058 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3059 		if (!intel_dig_port->dp.can_mst)
3060 			continue;
3061 
3062 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
3063 			   intel_dig_port->base.base.base.id,
3064 			   intel_dig_port->base.base.name);
3065 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3066 	}
3067 	drm_connector_list_iter_end(&conn_iter);
3068 
3069 	return 0;
3070 }
3071 
3072 static ssize_t i915_displayport_test_active_write(struct file *file,
3073 						  const char __user *ubuf,
3074 						  size_t len, loff_t *offp)
3075 {
3076 	char *input_buffer;
3077 	int status = 0;
3078 	struct drm_device *dev;
3079 	struct drm_connector *connector;
3080 	struct drm_connector_list_iter conn_iter;
3081 	struct intel_dp *intel_dp;
3082 	int val = 0;
3083 
3084 	dev = ((struct seq_file *)file->private_data)->private;
3085 
3086 	if (len == 0)
3087 		return 0;
3088 
3089 	input_buffer = memdup_user_nul(ubuf, len);
3090 	if (IS_ERR(input_buffer))
3091 		return PTR_ERR(input_buffer);
3092 
3093 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3094 
3095 	drm_connector_list_iter_begin(dev, &conn_iter);
3096 	drm_for_each_connector_iter(connector, &conn_iter) {
3097 		struct intel_encoder *encoder;
3098 
3099 		if (connector->connector_type !=
3100 		    DRM_MODE_CONNECTOR_DisplayPort)
3101 			continue;
3102 
3103 		encoder = to_intel_encoder(connector->encoder);
3104 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3105 			continue;
3106 
3107 		if (encoder && connector->status == connector_status_connected) {
3108 			intel_dp = enc_to_intel_dp(&encoder->base);
3109 			status = kstrtoint(input_buffer, 10, &val);
3110 			if (status < 0)
3111 				break;
3112 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3113 			/* To prevent erroneous activation of the compliance
3114 			 * testing code, only accept an actual value of 1 here
3115 			 */
3116 			if (val == 1)
3117 				intel_dp->compliance.test_active = 1;
3118 			else
3119 				intel_dp->compliance.test_active = 0;
3120 		}
3121 	}
3122 	drm_connector_list_iter_end(&conn_iter);
3123 	kfree(input_buffer);
3124 	if (status < 0)
3125 		return status;
3126 
3127 	*offp += len;
3128 	return len;
3129 }
3130 
3131 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3132 {
3133 	struct drm_i915_private *dev_priv = m->private;
3134 	struct drm_device *dev = &dev_priv->drm;
3135 	struct drm_connector *connector;
3136 	struct drm_connector_list_iter conn_iter;
3137 	struct intel_dp *intel_dp;
3138 
3139 	drm_connector_list_iter_begin(dev, &conn_iter);
3140 	drm_for_each_connector_iter(connector, &conn_iter) {
3141 		struct intel_encoder *encoder;
3142 
3143 		if (connector->connector_type !=
3144 		    DRM_MODE_CONNECTOR_DisplayPort)
3145 			continue;
3146 
3147 		encoder = to_intel_encoder(connector->encoder);
3148 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3149 			continue;
3150 
3151 		if (encoder && connector->status == connector_status_connected) {
3152 			intel_dp = enc_to_intel_dp(&encoder->base);
3153 			if (intel_dp->compliance.test_active)
3154 				seq_puts(m, "1");
3155 			else
3156 				seq_puts(m, "0");
3157 		} else
3158 			seq_puts(m, "0");
3159 	}
3160 	drm_connector_list_iter_end(&conn_iter);
3161 
3162 	return 0;
3163 }
3164 
3165 static int i915_displayport_test_active_open(struct inode *inode,
3166 					     struct file *file)
3167 {
3168 	return single_open(file, i915_displayport_test_active_show,
3169 			   inode->i_private);
3170 }
3171 
3172 static const struct file_operations i915_displayport_test_active_fops = {
3173 	.owner = THIS_MODULE,
3174 	.open = i915_displayport_test_active_open,
3175 	.read = seq_read,
3176 	.llseek = seq_lseek,
3177 	.release = single_release,
3178 	.write = i915_displayport_test_active_write
3179 };
3180 
3181 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3182 {
3183 	struct drm_i915_private *dev_priv = m->private;
3184 	struct drm_device *dev = &dev_priv->drm;
3185 	struct drm_connector *connector;
3186 	struct drm_connector_list_iter conn_iter;
3187 	struct intel_dp *intel_dp;
3188 
3189 	drm_connector_list_iter_begin(dev, &conn_iter);
3190 	drm_for_each_connector_iter(connector, &conn_iter) {
3191 		struct intel_encoder *encoder;
3192 
3193 		if (connector->connector_type !=
3194 		    DRM_MODE_CONNECTOR_DisplayPort)
3195 			continue;
3196 
3197 		encoder = to_intel_encoder(connector->encoder);
3198 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3199 			continue;
3200 
3201 		if (encoder && connector->status == connector_status_connected) {
3202 			intel_dp = enc_to_intel_dp(&encoder->base);
3203 			if (intel_dp->compliance.test_type ==
3204 			    DP_TEST_LINK_EDID_READ)
3205 				seq_printf(m, "%lx",
3206 					   intel_dp->compliance.test_data.edid);
3207 			else if (intel_dp->compliance.test_type ==
3208 				 DP_TEST_LINK_VIDEO_PATTERN) {
3209 				seq_printf(m, "hdisplay: %d\n",
3210 					   intel_dp->compliance.test_data.hdisplay);
3211 				seq_printf(m, "vdisplay: %d\n",
3212 					   intel_dp->compliance.test_data.vdisplay);
3213 				seq_printf(m, "bpc: %u\n",
3214 					   intel_dp->compliance.test_data.bpc);
3215 			}
3216 		} else
3217 			seq_puts(m, "0");
3218 	}
3219 	drm_connector_list_iter_end(&conn_iter);
3220 
3221 	return 0;
3222 }
3223 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3224 
3225 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3226 {
3227 	struct drm_i915_private *dev_priv = m->private;
3228 	struct drm_device *dev = &dev_priv->drm;
3229 	struct drm_connector *connector;
3230 	struct drm_connector_list_iter conn_iter;
3231 	struct intel_dp *intel_dp;
3232 
3233 	drm_connector_list_iter_begin(dev, &conn_iter);
3234 	drm_for_each_connector_iter(connector, &conn_iter) {
3235 		struct intel_encoder *encoder;
3236 
3237 		if (connector->connector_type !=
3238 		    DRM_MODE_CONNECTOR_DisplayPort)
3239 			continue;
3240 
3241 		encoder = to_intel_encoder(connector->encoder);
3242 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3243 			continue;
3244 
3245 		if (encoder && connector->status == connector_status_connected) {
3246 			intel_dp = enc_to_intel_dp(&encoder->base);
3247 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3248 		} else
3249 			seq_puts(m, "0");
3250 	}
3251 	drm_connector_list_iter_end(&conn_iter);
3252 
3253 	return 0;
3254 }
3255 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3256 
3257 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3258 {
3259 	struct drm_i915_private *dev_priv = m->private;
3260 	struct drm_device *dev = &dev_priv->drm;
3261 	int level;
3262 	int num_levels;
3263 
3264 	if (IS_CHERRYVIEW(dev_priv))
3265 		num_levels = 3;
3266 	else if (IS_VALLEYVIEW(dev_priv))
3267 		num_levels = 1;
3268 	else if (IS_G4X(dev_priv))
3269 		num_levels = 3;
3270 	else
3271 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3272 
3273 	drm_modeset_lock_all(dev);
3274 
3275 	for (level = 0; level < num_levels; level++) {
3276 		unsigned int latency = wm[level];
3277 
3278 		/*
3279 		 * - WM1+ latency values in 0.5us units
3280 		 * - latencies are in us on gen9/vlv/chv
3281 		 */
3282 		if (INTEL_GEN(dev_priv) >= 9 ||
3283 		    IS_VALLEYVIEW(dev_priv) ||
3284 		    IS_CHERRYVIEW(dev_priv) ||
3285 		    IS_G4X(dev_priv))
3286 			latency *= 10;
3287 		else if (level > 0)
3288 			latency *= 5;
3289 
3290 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3291 			   level, wm[level], latency / 10, latency % 10);
3292 	}
3293 
3294 	drm_modeset_unlock_all(dev);
3295 }
3296 
3297 static int pri_wm_latency_show(struct seq_file *m, void *data)
3298 {
3299 	struct drm_i915_private *dev_priv = m->private;
3300 	const u16 *latencies;
3301 
3302 	if (INTEL_GEN(dev_priv) >= 9)
3303 		latencies = dev_priv->wm.skl_latency;
3304 	else
3305 		latencies = dev_priv->wm.pri_latency;
3306 
3307 	wm_latency_show(m, latencies);
3308 
3309 	return 0;
3310 }
3311 
3312 static int spr_wm_latency_show(struct seq_file *m, void *data)
3313 {
3314 	struct drm_i915_private *dev_priv = m->private;
3315 	const u16 *latencies;
3316 
3317 	if (INTEL_GEN(dev_priv) >= 9)
3318 		latencies = dev_priv->wm.skl_latency;
3319 	else
3320 		latencies = dev_priv->wm.spr_latency;
3321 
3322 	wm_latency_show(m, latencies);
3323 
3324 	return 0;
3325 }
3326 
3327 static int cur_wm_latency_show(struct seq_file *m, void *data)
3328 {
3329 	struct drm_i915_private *dev_priv = m->private;
3330 	const u16 *latencies;
3331 
3332 	if (INTEL_GEN(dev_priv) >= 9)
3333 		latencies = dev_priv->wm.skl_latency;
3334 	else
3335 		latencies = dev_priv->wm.cur_latency;
3336 
3337 	wm_latency_show(m, latencies);
3338 
3339 	return 0;
3340 }
3341 
3342 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3343 {
3344 	struct drm_i915_private *dev_priv = inode->i_private;
3345 
3346 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3347 		return -ENODEV;
3348 
3349 	return single_open(file, pri_wm_latency_show, dev_priv);
3350 }
3351 
3352 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3353 {
3354 	struct drm_i915_private *dev_priv = inode->i_private;
3355 
3356 	if (HAS_GMCH(dev_priv))
3357 		return -ENODEV;
3358 
3359 	return single_open(file, spr_wm_latency_show, dev_priv);
3360 }
3361 
3362 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3363 {
3364 	struct drm_i915_private *dev_priv = inode->i_private;
3365 
3366 	if (HAS_GMCH(dev_priv))
3367 		return -ENODEV;
3368 
3369 	return single_open(file, cur_wm_latency_show, dev_priv);
3370 }
3371 
3372 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3373 				size_t len, loff_t *offp, u16 wm[8])
3374 {
3375 	struct seq_file *m = file->private_data;
3376 	struct drm_i915_private *dev_priv = m->private;
3377 	struct drm_device *dev = &dev_priv->drm;
3378 	u16 new[8] = { 0 };
3379 	int num_levels;
3380 	int level;
3381 	int ret;
3382 	char tmp[32];
3383 
3384 	if (IS_CHERRYVIEW(dev_priv))
3385 		num_levels = 3;
3386 	else if (IS_VALLEYVIEW(dev_priv))
3387 		num_levels = 1;
3388 	else if (IS_G4X(dev_priv))
3389 		num_levels = 3;
3390 	else
3391 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3392 
3393 	if (len >= sizeof(tmp))
3394 		return -EINVAL;
3395 
3396 	if (copy_from_user(tmp, ubuf, len))
3397 		return -EFAULT;
3398 
3399 	tmp[len] = '\0';
3400 
3401 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3402 		     &new[0], &new[1], &new[2], &new[3],
3403 		     &new[4], &new[5], &new[6], &new[7]);
3404 	if (ret != num_levels)
3405 		return -EINVAL;
3406 
3407 	drm_modeset_lock_all(dev);
3408 
3409 	for (level = 0; level < num_levels; level++)
3410 		wm[level] = new[level];
3411 
3412 	drm_modeset_unlock_all(dev);
3413 
3414 	return len;
3415 }
3416 
3417 
3418 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3419 				    size_t len, loff_t *offp)
3420 {
3421 	struct seq_file *m = file->private_data;
3422 	struct drm_i915_private *dev_priv = m->private;
3423 	u16 *latencies;
3424 
3425 	if (INTEL_GEN(dev_priv) >= 9)
3426 		latencies = dev_priv->wm.skl_latency;
3427 	else
3428 		latencies = dev_priv->wm.pri_latency;
3429 
3430 	return wm_latency_write(file, ubuf, len, offp, latencies);
3431 }
3432 
3433 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3434 				    size_t len, loff_t *offp)
3435 {
3436 	struct seq_file *m = file->private_data;
3437 	struct drm_i915_private *dev_priv = m->private;
3438 	u16 *latencies;
3439 
3440 	if (INTEL_GEN(dev_priv) >= 9)
3441 		latencies = dev_priv->wm.skl_latency;
3442 	else
3443 		latencies = dev_priv->wm.spr_latency;
3444 
3445 	return wm_latency_write(file, ubuf, len, offp, latencies);
3446 }
3447 
3448 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3449 				    size_t len, loff_t *offp)
3450 {
3451 	struct seq_file *m = file->private_data;
3452 	struct drm_i915_private *dev_priv = m->private;
3453 	u16 *latencies;
3454 
3455 	if (INTEL_GEN(dev_priv) >= 9)
3456 		latencies = dev_priv->wm.skl_latency;
3457 	else
3458 		latencies = dev_priv->wm.cur_latency;
3459 
3460 	return wm_latency_write(file, ubuf, len, offp, latencies);
3461 }
3462 
3463 static const struct file_operations i915_pri_wm_latency_fops = {
3464 	.owner = THIS_MODULE,
3465 	.open = pri_wm_latency_open,
3466 	.read = seq_read,
3467 	.llseek = seq_lseek,
3468 	.release = single_release,
3469 	.write = pri_wm_latency_write
3470 };
3471 
3472 static const struct file_operations i915_spr_wm_latency_fops = {
3473 	.owner = THIS_MODULE,
3474 	.open = spr_wm_latency_open,
3475 	.read = seq_read,
3476 	.llseek = seq_lseek,
3477 	.release = single_release,
3478 	.write = spr_wm_latency_write
3479 };
3480 
3481 static const struct file_operations i915_cur_wm_latency_fops = {
3482 	.owner = THIS_MODULE,
3483 	.open = cur_wm_latency_open,
3484 	.read = seq_read,
3485 	.llseek = seq_lseek,
3486 	.release = single_release,
3487 	.write = cur_wm_latency_write
3488 };
3489 
3490 static int
3491 i915_wedged_get(void *data, u64 *val)
3492 {
3493 	struct drm_i915_private *i915 = data;
3494 	int ret = intel_gt_terminally_wedged(&i915->gt);
3495 
3496 	switch (ret) {
3497 	case -EIO:
3498 		*val = 1;
3499 		return 0;
3500 	case 0:
3501 		*val = 0;
3502 		return 0;
3503 	default:
3504 		return ret;
3505 	}
3506 }
3507 
3508 static int
3509 i915_wedged_set(void *data, u64 val)
3510 {
3511 	struct drm_i915_private *i915 = data;
3512 
3513 	/* Flush any previous reset before applying for a new one */
3514 	wait_event(i915->gt.reset.queue,
3515 		   !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
3516 
3517 	intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3518 			      "Manually set wedged engine mask = %llx", val);
3519 	return 0;
3520 }
3521 
3522 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3523 			i915_wedged_get, i915_wedged_set,
3524 			"%llu\n");
3525 
3526 static int
3527 i915_perf_noa_delay_set(void *data, u64 val)
3528 {
3529 	struct drm_i915_private *i915 = data;
3530 	const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
3531 
3532 	/*
3533 	 * This would lead to infinite waits as we're doing timestamp
3534 	 * difference on the CS with only 32bits.
3535 	 */
3536 	if (val > mul_u32_u32(U32_MAX, clk))
3537 		return -EINVAL;
3538 
3539 	atomic64_set(&i915->perf.noa_programming_delay, val);
3540 	return 0;
3541 }
3542 
3543 static int
3544 i915_perf_noa_delay_get(void *data, u64 *val)
3545 {
3546 	struct drm_i915_private *i915 = data;
3547 
3548 	*val = atomic64_read(&i915->perf.noa_programming_delay);
3549 	return 0;
3550 }
3551 
3552 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
3553 			i915_perf_noa_delay_get,
3554 			i915_perf_noa_delay_set,
3555 			"%llu\n");
3556 
3557 #define DROP_UNBOUND	BIT(0)
3558 #define DROP_BOUND	BIT(1)
3559 #define DROP_RETIRE	BIT(2)
3560 #define DROP_ACTIVE	BIT(3)
3561 #define DROP_FREED	BIT(4)
3562 #define DROP_SHRINK_ALL	BIT(5)
3563 #define DROP_IDLE	BIT(6)
3564 #define DROP_RESET_ACTIVE	BIT(7)
3565 #define DROP_RESET_SEQNO	BIT(8)
3566 #define DROP_RCU	BIT(9)
3567 #define DROP_ALL (DROP_UNBOUND	| \
3568 		  DROP_BOUND	| \
3569 		  DROP_RETIRE	| \
3570 		  DROP_ACTIVE	| \
3571 		  DROP_FREED	| \
3572 		  DROP_SHRINK_ALL |\
3573 		  DROP_IDLE	| \
3574 		  DROP_RESET_ACTIVE | \
3575 		  DROP_RESET_SEQNO | \
3576 		  DROP_RCU)
3577 static int
3578 i915_drop_caches_get(void *data, u64 *val)
3579 {
3580 	*val = DROP_ALL;
3581 
3582 	return 0;
3583 }
3584 static int
3585 gt_drop_caches(struct intel_gt *gt, u64 val)
3586 {
3587 	int ret;
3588 
3589 	if (val & DROP_RESET_ACTIVE &&
3590 	    wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
3591 		intel_gt_set_wedged(gt);
3592 
3593 	if (val & DROP_RETIRE)
3594 		intel_gt_retire_requests(gt);
3595 
3596 	if (val & (DROP_IDLE | DROP_ACTIVE)) {
3597 		ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
3598 		if (ret)
3599 			return ret;
3600 	}
3601 
3602 	if (val & DROP_IDLE) {
3603 		ret = intel_gt_pm_wait_for_idle(gt);
3604 		if (ret)
3605 			return ret;
3606 	}
3607 
3608 	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
3609 		intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
3610 
3611 	return 0;
3612 }
3613 
3614 static int
3615 i915_drop_caches_set(void *data, u64 val)
3616 {
3617 	struct drm_i915_private *i915 = data;
3618 	int ret;
3619 
3620 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3621 		  val, val & DROP_ALL);
3622 
3623 	ret = gt_drop_caches(&i915->gt, val);
3624 	if (ret)
3625 		return ret;
3626 
3627 	fs_reclaim_acquire(GFP_KERNEL);
3628 	if (val & DROP_BOUND)
3629 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3630 
3631 	if (val & DROP_UNBOUND)
3632 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3633 
3634 	if (val & DROP_SHRINK_ALL)
3635 		i915_gem_shrink_all(i915);
3636 	fs_reclaim_release(GFP_KERNEL);
3637 
3638 	if (val & DROP_RCU)
3639 		rcu_barrier();
3640 
3641 	if (val & DROP_FREED)
3642 		i915_gem_drain_freed_objects(i915);
3643 
3644 	return 0;
3645 }
3646 
3647 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3648 			i915_drop_caches_get, i915_drop_caches_set,
3649 			"0x%08llx\n");
3650 
3651 static int
3652 i915_cache_sharing_get(void *data, u64 *val)
3653 {
3654 	struct drm_i915_private *dev_priv = data;
3655 	intel_wakeref_t wakeref;
3656 	u32 snpcr = 0;
3657 
3658 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3659 		return -ENODEV;
3660 
3661 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3662 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3663 
3664 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3665 
3666 	return 0;
3667 }
3668 
3669 static int
3670 i915_cache_sharing_set(void *data, u64 val)
3671 {
3672 	struct drm_i915_private *dev_priv = data;
3673 	intel_wakeref_t wakeref;
3674 
3675 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3676 		return -ENODEV;
3677 
3678 	if (val > 3)
3679 		return -EINVAL;
3680 
3681 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3682 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3683 		u32 snpcr;
3684 
3685 		/* Update the cache sharing policy here as well */
3686 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3687 		snpcr &= ~GEN6_MBC_SNPCR_MASK;
3688 		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3689 		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3690 	}
3691 
3692 	return 0;
3693 }
3694 
3695 static void
3696 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
3697 			  u8 *to_mask)
3698 {
3699 	int offset = slice * sseu->ss_stride;
3700 
3701 	memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
3702 }
3703 
3704 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3705 			i915_cache_sharing_get, i915_cache_sharing_set,
3706 			"%llu\n");
3707 
3708 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3709 					  struct sseu_dev_info *sseu)
3710 {
3711 #define SS_MAX 2
3712 	const int ss_max = SS_MAX;
3713 	u32 sig1[SS_MAX], sig2[SS_MAX];
3714 	int ss;
3715 
3716 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3717 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3718 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3719 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3720 
3721 	for (ss = 0; ss < ss_max; ss++) {
3722 		unsigned int eu_cnt;
3723 
3724 		if (sig1[ss] & CHV_SS_PG_ENABLE)
3725 			/* skip disabled subslice */
3726 			continue;
3727 
3728 		sseu->slice_mask = BIT(0);
3729 		sseu->subslice_mask[0] |= BIT(ss);
3730 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3731 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3732 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3733 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3734 		sseu->eu_total += eu_cnt;
3735 		sseu->eu_per_subslice = max_t(unsigned int,
3736 					      sseu->eu_per_subslice, eu_cnt);
3737 	}
3738 #undef SS_MAX
3739 }
3740 
3741 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3742 				     struct sseu_dev_info *sseu)
3743 {
3744 #define SS_MAX 6
3745 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3746 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3747 	int s, ss;
3748 
3749 	for (s = 0; s < info->sseu.max_slices; s++) {
3750 		/*
3751 		 * FIXME: Valid SS Mask respects the spec and read
3752 		 * only valid bits for those registers, excluding reserved
3753 		 * although this seems wrong because it would leave many
3754 		 * subslices without ACK.
3755 		 */
3756 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3757 			GEN10_PGCTL_VALID_SS_MASK(s);
3758 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3759 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3760 	}
3761 
3762 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3763 		     GEN9_PGCTL_SSA_EU19_ACK |
3764 		     GEN9_PGCTL_SSA_EU210_ACK |
3765 		     GEN9_PGCTL_SSA_EU311_ACK;
3766 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3767 		     GEN9_PGCTL_SSB_EU19_ACK |
3768 		     GEN9_PGCTL_SSB_EU210_ACK |
3769 		     GEN9_PGCTL_SSB_EU311_ACK;
3770 
3771 	for (s = 0; s < info->sseu.max_slices; s++) {
3772 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3773 			/* skip disabled slice */
3774 			continue;
3775 
3776 		sseu->slice_mask |= BIT(s);
3777 		intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
3778 
3779 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3780 			unsigned int eu_cnt;
3781 
3782 			if (info->sseu.has_subslice_pg &&
3783 			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3784 				/* skip disabled subslice */
3785 				continue;
3786 
3787 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3788 					       eu_mask[ss % 2]);
3789 			sseu->eu_total += eu_cnt;
3790 			sseu->eu_per_subslice = max_t(unsigned int,
3791 						      sseu->eu_per_subslice,
3792 						      eu_cnt);
3793 		}
3794 	}
3795 #undef SS_MAX
3796 }
3797 
3798 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3799 				    struct sseu_dev_info *sseu)
3800 {
3801 #define SS_MAX 3
3802 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3803 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3804 	int s, ss;
3805 
3806 	for (s = 0; s < info->sseu.max_slices; s++) {
3807 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3808 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3809 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3810 	}
3811 
3812 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3813 		     GEN9_PGCTL_SSA_EU19_ACK |
3814 		     GEN9_PGCTL_SSA_EU210_ACK |
3815 		     GEN9_PGCTL_SSA_EU311_ACK;
3816 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3817 		     GEN9_PGCTL_SSB_EU19_ACK |
3818 		     GEN9_PGCTL_SSB_EU210_ACK |
3819 		     GEN9_PGCTL_SSB_EU311_ACK;
3820 
3821 	for (s = 0; s < info->sseu.max_slices; s++) {
3822 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3823 			/* skip disabled slice */
3824 			continue;
3825 
3826 		sseu->slice_mask |= BIT(s);
3827 
3828 		if (IS_GEN9_BC(dev_priv))
3829 			intel_sseu_copy_subslices(&info->sseu, s,
3830 						  sseu->subslice_mask);
3831 
3832 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3833 			unsigned int eu_cnt;
3834 			u8 ss_idx = s * info->sseu.ss_stride +
3835 				    ss / BITS_PER_BYTE;
3836 
3837 			if (IS_GEN9_LP(dev_priv)) {
3838 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3839 					/* skip disabled subslice */
3840 					continue;
3841 
3842 				sseu->subslice_mask[ss_idx] |=
3843 					BIT(ss % BITS_PER_BYTE);
3844 			}
3845 
3846 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3847 					       eu_mask[ss%2]);
3848 			sseu->eu_total += eu_cnt;
3849 			sseu->eu_per_subslice = max_t(unsigned int,
3850 						      sseu->eu_per_subslice,
3851 						      eu_cnt);
3852 		}
3853 	}
3854 #undef SS_MAX
3855 }
3856 
3857 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3858 					 struct sseu_dev_info *sseu)
3859 {
3860 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3861 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3862 	int s;
3863 
3864 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3865 
3866 	if (sseu->slice_mask) {
3867 		sseu->eu_per_subslice = info->sseu.eu_per_subslice;
3868 		for (s = 0; s < fls(sseu->slice_mask); s++)
3869 			intel_sseu_copy_subslices(&info->sseu, s,
3870 						  sseu->subslice_mask);
3871 		sseu->eu_total = sseu->eu_per_subslice *
3872 				 intel_sseu_subslice_total(sseu);
3873 
3874 		/* subtract fused off EU(s) from enabled slice(s) */
3875 		for (s = 0; s < fls(sseu->slice_mask); s++) {
3876 			u8 subslice_7eu = info->sseu.subslice_7eu[s];
3877 
3878 			sseu->eu_total -= hweight8(subslice_7eu);
3879 		}
3880 	}
3881 }
3882 
3883 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3884 				 const struct sseu_dev_info *sseu)
3885 {
3886 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3887 	const char *type = is_available_info ? "Available" : "Enabled";
3888 	int s;
3889 
3890 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
3891 		   sseu->slice_mask);
3892 	seq_printf(m, "  %s Slice Total: %u\n", type,
3893 		   hweight8(sseu->slice_mask));
3894 	seq_printf(m, "  %s Subslice Total: %u\n", type,
3895 		   intel_sseu_subslice_total(sseu));
3896 	for (s = 0; s < fls(sseu->slice_mask); s++) {
3897 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
3898 			   s, intel_sseu_subslices_per_slice(sseu, s));
3899 	}
3900 	seq_printf(m, "  %s EU Total: %u\n", type,
3901 		   sseu->eu_total);
3902 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
3903 		   sseu->eu_per_subslice);
3904 
3905 	if (!is_available_info)
3906 		return;
3907 
3908 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3909 	if (HAS_POOLED_EU(dev_priv))
3910 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
3911 
3912 	seq_printf(m, "  Has Slice Power Gating: %s\n",
3913 		   yesno(sseu->has_slice_pg));
3914 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
3915 		   yesno(sseu->has_subslice_pg));
3916 	seq_printf(m, "  Has EU Power Gating: %s\n",
3917 		   yesno(sseu->has_eu_pg));
3918 }
3919 
3920 static int i915_sseu_status(struct seq_file *m, void *unused)
3921 {
3922 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3923 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3924 	struct sseu_dev_info sseu;
3925 	intel_wakeref_t wakeref;
3926 
3927 	if (INTEL_GEN(dev_priv) < 8)
3928 		return -ENODEV;
3929 
3930 	seq_puts(m, "SSEU Device Info\n");
3931 	i915_print_sseu_info(m, true, &info->sseu);
3932 
3933 	seq_puts(m, "SSEU Device Status\n");
3934 	memset(&sseu, 0, sizeof(sseu));
3935 	intel_sseu_set_info(&sseu, info->sseu.max_slices,
3936 			    info->sseu.max_subslices,
3937 			    info->sseu.max_eus_per_subslice);
3938 
3939 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3940 		if (IS_CHERRYVIEW(dev_priv))
3941 			cherryview_sseu_device_status(dev_priv, &sseu);
3942 		else if (IS_BROADWELL(dev_priv))
3943 			broadwell_sseu_device_status(dev_priv, &sseu);
3944 		else if (IS_GEN(dev_priv, 9))
3945 			gen9_sseu_device_status(dev_priv, &sseu);
3946 		else if (INTEL_GEN(dev_priv) >= 10)
3947 			gen10_sseu_device_status(dev_priv, &sseu);
3948 	}
3949 
3950 	i915_print_sseu_info(m, false, &sseu);
3951 
3952 	return 0;
3953 }
3954 
3955 static int i915_forcewake_open(struct inode *inode, struct file *file)
3956 {
3957 	struct drm_i915_private *i915 = inode->i_private;
3958 	struct intel_gt *gt = &i915->gt;
3959 
3960 	atomic_inc(&gt->user_wakeref);
3961 	intel_gt_pm_get(gt);
3962 	if (INTEL_GEN(i915) >= 6)
3963 		intel_uncore_forcewake_user_get(gt->uncore);
3964 
3965 	return 0;
3966 }
3967 
3968 static int i915_forcewake_release(struct inode *inode, struct file *file)
3969 {
3970 	struct drm_i915_private *i915 = inode->i_private;
3971 	struct intel_gt *gt = &i915->gt;
3972 
3973 	if (INTEL_GEN(i915) >= 6)
3974 		intel_uncore_forcewake_user_put(&i915->uncore);
3975 	intel_gt_pm_put(gt);
3976 	atomic_dec(&gt->user_wakeref);
3977 
3978 	return 0;
3979 }
3980 
3981 static const struct file_operations i915_forcewake_fops = {
3982 	.owner = THIS_MODULE,
3983 	.open = i915_forcewake_open,
3984 	.release = i915_forcewake_release,
3985 };
3986 
3987 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
3988 {
3989 	struct drm_i915_private *dev_priv = m->private;
3990 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
3991 
3992 	/* Synchronize with everything first in case there's been an HPD
3993 	 * storm, but we haven't finished handling it in the kernel yet
3994 	 */
3995 	intel_synchronize_irq(dev_priv);
3996 	flush_work(&dev_priv->hotplug.dig_port_work);
3997 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
3998 
3999 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4000 	seq_printf(m, "Detected: %s\n",
4001 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4002 
4003 	return 0;
4004 }
4005 
4006 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4007 					const char __user *ubuf, size_t len,
4008 					loff_t *offp)
4009 {
4010 	struct seq_file *m = file->private_data;
4011 	struct drm_i915_private *dev_priv = m->private;
4012 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4013 	unsigned int new_threshold;
4014 	int i;
4015 	char *newline;
4016 	char tmp[16];
4017 
4018 	if (len >= sizeof(tmp))
4019 		return -EINVAL;
4020 
4021 	if (copy_from_user(tmp, ubuf, len))
4022 		return -EFAULT;
4023 
4024 	tmp[len] = '\0';
4025 
4026 	/* Strip newline, if any */
4027 	newline = strchr(tmp, '\n');
4028 	if (newline)
4029 		*newline = '\0';
4030 
4031 	if (strcmp(tmp, "reset") == 0)
4032 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4033 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4034 		return -EINVAL;
4035 
4036 	if (new_threshold > 0)
4037 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4038 			      new_threshold);
4039 	else
4040 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4041 
4042 	spin_lock_irq(&dev_priv->irq_lock);
4043 	hotplug->hpd_storm_threshold = new_threshold;
4044 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4045 	for_each_hpd_pin(i)
4046 		hotplug->stats[i].count = 0;
4047 	spin_unlock_irq(&dev_priv->irq_lock);
4048 
4049 	/* Re-enable hpd immediately if we were in an irq storm */
4050 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4051 
4052 	return len;
4053 }
4054 
4055 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4056 {
4057 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4058 }
4059 
4060 static const struct file_operations i915_hpd_storm_ctl_fops = {
4061 	.owner = THIS_MODULE,
4062 	.open = i915_hpd_storm_ctl_open,
4063 	.read = seq_read,
4064 	.llseek = seq_lseek,
4065 	.release = single_release,
4066 	.write = i915_hpd_storm_ctl_write
4067 };
4068 
4069 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4070 {
4071 	struct drm_i915_private *dev_priv = m->private;
4072 
4073 	seq_printf(m, "Enabled: %s\n",
4074 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4075 
4076 	return 0;
4077 }
4078 
4079 static int
4080 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4081 {
4082 	return single_open(file, i915_hpd_short_storm_ctl_show,
4083 			   inode->i_private);
4084 }
4085 
4086 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4087 					      const char __user *ubuf,
4088 					      size_t len, loff_t *offp)
4089 {
4090 	struct seq_file *m = file->private_data;
4091 	struct drm_i915_private *dev_priv = m->private;
4092 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4093 	char *newline;
4094 	char tmp[16];
4095 	int i;
4096 	bool new_state;
4097 
4098 	if (len >= sizeof(tmp))
4099 		return -EINVAL;
4100 
4101 	if (copy_from_user(tmp, ubuf, len))
4102 		return -EFAULT;
4103 
4104 	tmp[len] = '\0';
4105 
4106 	/* Strip newline, if any */
4107 	newline = strchr(tmp, '\n');
4108 	if (newline)
4109 		*newline = '\0';
4110 
4111 	/* Reset to the "default" state for this system */
4112 	if (strcmp(tmp, "reset") == 0)
4113 		new_state = !HAS_DP_MST(dev_priv);
4114 	else if (kstrtobool(tmp, &new_state) != 0)
4115 		return -EINVAL;
4116 
4117 	DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4118 		      new_state ? "En" : "Dis");
4119 
4120 	spin_lock_irq(&dev_priv->irq_lock);
4121 	hotplug->hpd_short_storm_enabled = new_state;
4122 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4123 	for_each_hpd_pin(i)
4124 		hotplug->stats[i].count = 0;
4125 	spin_unlock_irq(&dev_priv->irq_lock);
4126 
4127 	/* Re-enable hpd immediately if we were in an irq storm */
4128 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4129 
4130 	return len;
4131 }
4132 
4133 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4134 	.owner = THIS_MODULE,
4135 	.open = i915_hpd_short_storm_ctl_open,
4136 	.read = seq_read,
4137 	.llseek = seq_lseek,
4138 	.release = single_release,
4139 	.write = i915_hpd_short_storm_ctl_write,
4140 };
4141 
4142 static int i915_drrs_ctl_set(void *data, u64 val)
4143 {
4144 	struct drm_i915_private *dev_priv = data;
4145 	struct drm_device *dev = &dev_priv->drm;
4146 	struct intel_crtc *crtc;
4147 
4148 	if (INTEL_GEN(dev_priv) < 7)
4149 		return -ENODEV;
4150 
4151 	for_each_intel_crtc(dev, crtc) {
4152 		struct drm_connector_list_iter conn_iter;
4153 		struct intel_crtc_state *crtc_state;
4154 		struct drm_connector *connector;
4155 		struct drm_crtc_commit *commit;
4156 		int ret;
4157 
4158 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4159 		if (ret)
4160 			return ret;
4161 
4162 		crtc_state = to_intel_crtc_state(crtc->base.state);
4163 
4164 		if (!crtc_state->base.active ||
4165 		    !crtc_state->has_drrs)
4166 			goto out;
4167 
4168 		commit = crtc_state->base.commit;
4169 		if (commit) {
4170 			ret = wait_for_completion_interruptible(&commit->hw_done);
4171 			if (ret)
4172 				goto out;
4173 		}
4174 
4175 		drm_connector_list_iter_begin(dev, &conn_iter);
4176 		drm_for_each_connector_iter(connector, &conn_iter) {
4177 			struct intel_encoder *encoder;
4178 			struct intel_dp *intel_dp;
4179 
4180 			if (!(crtc_state->base.connector_mask &
4181 			      drm_connector_mask(connector)))
4182 				continue;
4183 
4184 			encoder = intel_attached_encoder(connector);
4185 			if (encoder->type != INTEL_OUTPUT_EDP)
4186 				continue;
4187 
4188 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4189 						val ? "en" : "dis", val);
4190 
4191 			intel_dp = enc_to_intel_dp(&encoder->base);
4192 			if (val)
4193 				intel_edp_drrs_enable(intel_dp,
4194 						      crtc_state);
4195 			else
4196 				intel_edp_drrs_disable(intel_dp,
4197 						       crtc_state);
4198 		}
4199 		drm_connector_list_iter_end(&conn_iter);
4200 
4201 out:
4202 		drm_modeset_unlock(&crtc->base.mutex);
4203 		if (ret)
4204 			return ret;
4205 	}
4206 
4207 	return 0;
4208 }
4209 
4210 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4211 
4212 static ssize_t
4213 i915_fifo_underrun_reset_write(struct file *filp,
4214 			       const char __user *ubuf,
4215 			       size_t cnt, loff_t *ppos)
4216 {
4217 	struct drm_i915_private *dev_priv = filp->private_data;
4218 	struct intel_crtc *intel_crtc;
4219 	struct drm_device *dev = &dev_priv->drm;
4220 	int ret;
4221 	bool reset;
4222 
4223 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4224 	if (ret)
4225 		return ret;
4226 
4227 	if (!reset)
4228 		return cnt;
4229 
4230 	for_each_intel_crtc(dev, intel_crtc) {
4231 		struct drm_crtc_commit *commit;
4232 		struct intel_crtc_state *crtc_state;
4233 
4234 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4235 		if (ret)
4236 			return ret;
4237 
4238 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4239 		commit = crtc_state->base.commit;
4240 		if (commit) {
4241 			ret = wait_for_completion_interruptible(&commit->hw_done);
4242 			if (!ret)
4243 				ret = wait_for_completion_interruptible(&commit->flip_done);
4244 		}
4245 
4246 		if (!ret && crtc_state->base.active) {
4247 			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4248 				      pipe_name(intel_crtc->pipe));
4249 
4250 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4251 		}
4252 
4253 		drm_modeset_unlock(&intel_crtc->base.mutex);
4254 
4255 		if (ret)
4256 			return ret;
4257 	}
4258 
4259 	ret = intel_fbc_reset_underrun(dev_priv);
4260 	if (ret)
4261 		return ret;
4262 
4263 	return cnt;
4264 }
4265 
4266 static const struct file_operations i915_fifo_underrun_reset_ops = {
4267 	.owner = THIS_MODULE,
4268 	.open = simple_open,
4269 	.write = i915_fifo_underrun_reset_write,
4270 	.llseek = default_llseek,
4271 };
4272 
4273 static const struct drm_info_list i915_debugfs_list[] = {
4274 	{"i915_capabilities", i915_capabilities, 0},
4275 	{"i915_gem_objects", i915_gem_object_info, 0},
4276 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4277 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4278 	{"i915_guc_info", i915_guc_info, 0},
4279 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4280 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4281 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4282 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4283 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4284 	{"i915_frequency_info", i915_frequency_info, 0},
4285 	{"i915_drpc_info", i915_drpc_info, 0},
4286 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4287 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4288 	{"i915_fbc_status", i915_fbc_status, 0},
4289 	{"i915_ips_status", i915_ips_status, 0},
4290 	{"i915_sr_status", i915_sr_status, 0},
4291 	{"i915_opregion", i915_opregion, 0},
4292 	{"i915_vbt", i915_vbt, 0},
4293 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4294 	{"i915_context_status", i915_context_status, 0},
4295 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4296 	{"i915_swizzle_info", i915_swizzle_info, 0},
4297 	{"i915_llc", i915_llc, 0},
4298 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4299 	{"i915_energy_uJ", i915_energy_uJ, 0},
4300 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4301 	{"i915_power_domain_info", i915_power_domain_info, 0},
4302 	{"i915_dmc_info", i915_dmc_info, 0},
4303 	{"i915_display_info", i915_display_info, 0},
4304 	{"i915_engine_info", i915_engine_info, 0},
4305 	{"i915_rcs_topology", i915_rcs_topology, 0},
4306 	{"i915_shrinker_info", i915_shrinker_info, 0},
4307 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4308 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4309 	{"i915_wa_registers", i915_wa_registers, 0},
4310 	{"i915_ddb_info", i915_ddb_info, 0},
4311 	{"i915_sseu_status", i915_sseu_status, 0},
4312 	{"i915_drrs_status", i915_drrs_status, 0},
4313 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4314 };
4315 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4316 
4317 static const struct i915_debugfs_files {
4318 	const char *name;
4319 	const struct file_operations *fops;
4320 } i915_debugfs_files[] = {
4321 	{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
4322 	{"i915_wedged", &i915_wedged_fops},
4323 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4324 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4325 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4326 	{"i915_error_state", &i915_error_state_fops},
4327 	{"i915_gpu_info", &i915_gpu_info_fops},
4328 #endif
4329 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4330 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4331 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4332 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4333 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4334 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4335 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4336 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4337 	{"i915_guc_log_level", &i915_guc_log_level_fops},
4338 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4339 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4340 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4341 	{"i915_ipc_status", &i915_ipc_status_fops},
4342 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4343 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4344 };
4345 
4346 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4347 {
4348 	struct drm_minor *minor = dev_priv->drm.primary;
4349 	int i;
4350 
4351 	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4352 			    to_i915(minor->dev), &i915_forcewake_fops);
4353 
4354 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4355 		debugfs_create_file(i915_debugfs_files[i].name,
4356 				    S_IRUGO | S_IWUSR,
4357 				    minor->debugfs_root,
4358 				    to_i915(minor->dev),
4359 				    i915_debugfs_files[i].fops);
4360 	}
4361 
4362 	return drm_debugfs_create_files(i915_debugfs_list,
4363 					I915_DEBUGFS_ENTRIES,
4364 					minor->debugfs_root, minor);
4365 }
4366 
4367 struct dpcd_block {
4368 	/* DPCD dump start address. */
4369 	unsigned int offset;
4370 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4371 	unsigned int end;
4372 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4373 	size_t size;
4374 	/* Only valid for eDP. */
4375 	bool edp;
4376 };
4377 
4378 static const struct dpcd_block i915_dpcd_debug[] = {
4379 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4380 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4381 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4382 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4383 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4384 	{ .offset = DP_SET_POWER },
4385 	{ .offset = DP_EDP_DPCD_REV },
4386 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4387 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4388 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4389 };
4390 
4391 static int i915_dpcd_show(struct seq_file *m, void *data)
4392 {
4393 	struct drm_connector *connector = m->private;
4394 	struct intel_dp *intel_dp =
4395 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4396 	u8 buf[16];
4397 	ssize_t err;
4398 	int i;
4399 
4400 	if (connector->status != connector_status_connected)
4401 		return -ENODEV;
4402 
4403 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4404 		const struct dpcd_block *b = &i915_dpcd_debug[i];
4405 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4406 
4407 		if (b->edp &&
4408 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4409 			continue;
4410 
4411 		/* low tech for now */
4412 		if (WARN_ON(size > sizeof(buf)))
4413 			continue;
4414 
4415 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4416 		if (err < 0)
4417 			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4418 		else
4419 			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4420 	}
4421 
4422 	return 0;
4423 }
4424 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4425 
4426 static int i915_panel_show(struct seq_file *m, void *data)
4427 {
4428 	struct drm_connector *connector = m->private;
4429 	struct intel_dp *intel_dp =
4430 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4431 
4432 	if (connector->status != connector_status_connected)
4433 		return -ENODEV;
4434 
4435 	seq_printf(m, "Panel power up delay: %d\n",
4436 		   intel_dp->panel_power_up_delay);
4437 	seq_printf(m, "Panel power down delay: %d\n",
4438 		   intel_dp->panel_power_down_delay);
4439 	seq_printf(m, "Backlight on delay: %d\n",
4440 		   intel_dp->backlight_on_delay);
4441 	seq_printf(m, "Backlight off delay: %d\n",
4442 		   intel_dp->backlight_off_delay);
4443 
4444 	return 0;
4445 }
4446 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4447 
4448 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4449 {
4450 	struct drm_connector *connector = m->private;
4451 	struct intel_connector *intel_connector = to_intel_connector(connector);
4452 
4453 	if (connector->status != connector_status_connected)
4454 		return -ENODEV;
4455 
4456 	/* HDCP is supported by connector */
4457 	if (!intel_connector->hdcp.shim)
4458 		return -EINVAL;
4459 
4460 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
4461 		   connector->base.id);
4462 	intel_hdcp_info(m, intel_connector);
4463 
4464 	return 0;
4465 }
4466 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4467 
4468 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4469 {
4470 	struct drm_connector *connector = m->private;
4471 	struct drm_device *dev = connector->dev;
4472 	struct drm_crtc *crtc;
4473 	struct intel_dp *intel_dp;
4474 	struct drm_modeset_acquire_ctx ctx;
4475 	struct intel_crtc_state *crtc_state = NULL;
4476 	int ret = 0;
4477 	bool try_again = false;
4478 
4479 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4480 
4481 	do {
4482 		try_again = false;
4483 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4484 				       &ctx);
4485 		if (ret) {
4486 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4487 				try_again = true;
4488 				continue;
4489 			}
4490 			break;
4491 		}
4492 		crtc = connector->state->crtc;
4493 		if (connector->status != connector_status_connected || !crtc) {
4494 			ret = -ENODEV;
4495 			break;
4496 		}
4497 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
4498 		if (ret == -EDEADLK) {
4499 			ret = drm_modeset_backoff(&ctx);
4500 			if (!ret) {
4501 				try_again = true;
4502 				continue;
4503 			}
4504 			break;
4505 		} else if (ret) {
4506 			break;
4507 		}
4508 		intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4509 		crtc_state = to_intel_crtc_state(crtc->state);
4510 		seq_printf(m, "DSC_Enabled: %s\n",
4511 			   yesno(crtc_state->dsc.compression_enable));
4512 		seq_printf(m, "DSC_Sink_Support: %s\n",
4513 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4514 		seq_printf(m, "Force_DSC_Enable: %s\n",
4515 			   yesno(intel_dp->force_dsc_en));
4516 		if (!intel_dp_is_edp(intel_dp))
4517 			seq_printf(m, "FEC_Sink_Support: %s\n",
4518 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4519 	} while (try_again);
4520 
4521 	drm_modeset_drop_locks(&ctx);
4522 	drm_modeset_acquire_fini(&ctx);
4523 
4524 	return ret;
4525 }
4526 
4527 static ssize_t i915_dsc_fec_support_write(struct file *file,
4528 					  const char __user *ubuf,
4529 					  size_t len, loff_t *offp)
4530 {
4531 	bool dsc_enable = false;
4532 	int ret;
4533 	struct drm_connector *connector =
4534 		((struct seq_file *)file->private_data)->private;
4535 	struct intel_encoder *encoder = intel_attached_encoder(connector);
4536 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4537 
4538 	if (len == 0)
4539 		return 0;
4540 
4541 	DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4542 			 len);
4543 
4544 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4545 	if (ret < 0)
4546 		return ret;
4547 
4548 	DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4549 			 (dsc_enable) ? "true" : "false");
4550 	intel_dp->force_dsc_en = dsc_enable;
4551 
4552 	*offp += len;
4553 	return len;
4554 }
4555 
4556 static int i915_dsc_fec_support_open(struct inode *inode,
4557 				     struct file *file)
4558 {
4559 	return single_open(file, i915_dsc_fec_support_show,
4560 			   inode->i_private);
4561 }
4562 
4563 static const struct file_operations i915_dsc_fec_support_fops = {
4564 	.owner = THIS_MODULE,
4565 	.open = i915_dsc_fec_support_open,
4566 	.read = seq_read,
4567 	.llseek = seq_lseek,
4568 	.release = single_release,
4569 	.write = i915_dsc_fec_support_write
4570 };
4571 
4572 /**
4573  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4574  * @connector: pointer to a registered drm_connector
4575  *
4576  * Cleanup will be done by drm_connector_unregister() through a call to
4577  * drm_debugfs_connector_remove().
4578  *
4579  * Returns 0 on success, negative error codes on error.
4580  */
4581 int i915_debugfs_connector_add(struct drm_connector *connector)
4582 {
4583 	struct dentry *root = connector->debugfs_entry;
4584 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4585 
4586 	/* The connector must have been registered beforehands. */
4587 	if (!root)
4588 		return -ENODEV;
4589 
4590 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4591 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4592 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4593 				    connector, &i915_dpcd_fops);
4594 
4595 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4596 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4597 				    connector, &i915_panel_fops);
4598 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4599 				    connector, &i915_psr_sink_status_fops);
4600 	}
4601 
4602 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4603 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4604 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4605 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4606 				    connector, &i915_hdcp_sink_capability_fops);
4607 	}
4608 
4609 	if (INTEL_GEN(dev_priv) >= 10 &&
4610 	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4611 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4612 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4613 				    connector, &i915_dsc_fec_support_fops);
4614 
4615 	return 0;
4616 }
4617