1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
31 
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
34 
35 #include "display/intel_dp.h"
36 #include "display/intel_fbc.h"
37 #include "display/intel_hdcp.h"
38 #include "display/intel_hdmi.h"
39 #include "display/intel_psr.h"
40 
41 #include "gem/i915_gem_context.h"
42 #include "gt/intel_reset.h"
43 #include "gt/uc/intel_guc_submission.h"
44 
45 #include "i915_debugfs.h"
46 #include "i915_irq.h"
47 #include "intel_csr.h"
48 #include "intel_drv.h"
49 #include "intel_pm.h"
50 #include "intel_sideband.h"
51 
52 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
53 {
54 	return to_i915(node->minor->dev);
55 }
56 
57 static int i915_capabilities(struct seq_file *m, void *data)
58 {
59 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
60 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
61 	struct drm_printer p = drm_seq_file_printer(m);
62 
63 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
64 	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
65 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
66 
67 	intel_device_info_dump_flags(info, &p);
68 	intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
69 	intel_driver_caps_print(&dev_priv->caps, &p);
70 
71 	kernel_param_lock(THIS_MODULE);
72 	i915_params_dump(&i915_modparams, &p);
73 	kernel_param_unlock(THIS_MODULE);
74 
75 	return 0;
76 }
77 
78 static char get_pin_flag(struct drm_i915_gem_object *obj)
79 {
80 	return obj->pin_global ? 'p' : ' ';
81 }
82 
83 static char get_tiling_flag(struct drm_i915_gem_object *obj)
84 {
85 	switch (i915_gem_object_get_tiling(obj)) {
86 	default:
87 	case I915_TILING_NONE: return ' ';
88 	case I915_TILING_X: return 'X';
89 	case I915_TILING_Y: return 'Y';
90 	}
91 }
92 
93 static char get_global_flag(struct drm_i915_gem_object *obj)
94 {
95 	return obj->userfault_count ? 'g' : ' ';
96 }
97 
98 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
99 {
100 	return obj->mm.mapping ? 'M' : ' ';
101 }
102 
103 static const char *
104 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
105 {
106 	size_t x = 0;
107 
108 	switch (page_sizes) {
109 	case 0:
110 		return "";
111 	case I915_GTT_PAGE_SIZE_4K:
112 		return "4K";
113 	case I915_GTT_PAGE_SIZE_64K:
114 		return "64K";
115 	case I915_GTT_PAGE_SIZE_2M:
116 		return "2M";
117 	default:
118 		if (!buf)
119 			return "M";
120 
121 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
122 			x += snprintf(buf + x, len - x, "2M, ");
123 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
124 			x += snprintf(buf + x, len - x, "64K, ");
125 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
126 			x += snprintf(buf + x, len - x, "4K, ");
127 		buf[x-2] = '\0';
128 
129 		return buf;
130 	}
131 }
132 
133 static void
134 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
135 {
136 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
137 	struct intel_engine_cs *engine;
138 	struct i915_vma *vma;
139 	unsigned int frontbuffer_bits;
140 	int pin_count = 0;
141 
142 	seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
143 		   &obj->base,
144 		   get_pin_flag(obj),
145 		   get_tiling_flag(obj),
146 		   get_global_flag(obj),
147 		   get_pin_mapped_flag(obj),
148 		   obj->base.size / 1024,
149 		   obj->read_domains,
150 		   obj->write_domain,
151 		   i915_cache_level_str(dev_priv, obj->cache_level),
152 		   obj->mm.dirty ? " dirty" : "",
153 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
154 	if (obj->base.name)
155 		seq_printf(m, " (name: %d)", obj->base.name);
156 
157 	spin_lock(&obj->vma.lock);
158 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
159 		if (!drm_mm_node_allocated(&vma->node))
160 			continue;
161 
162 		spin_unlock(&obj->vma.lock);
163 
164 		if (i915_vma_is_pinned(vma))
165 			pin_count++;
166 
167 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
168 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
169 			   vma->node.start, vma->node.size,
170 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
171 		if (i915_vma_is_ggtt(vma)) {
172 			switch (vma->ggtt_view.type) {
173 			case I915_GGTT_VIEW_NORMAL:
174 				seq_puts(m, ", normal");
175 				break;
176 
177 			case I915_GGTT_VIEW_PARTIAL:
178 				seq_printf(m, ", partial [%08llx+%x]",
179 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
180 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
181 				break;
182 
183 			case I915_GGTT_VIEW_ROTATED:
184 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
185 					   vma->ggtt_view.rotated.plane[0].width,
186 					   vma->ggtt_view.rotated.plane[0].height,
187 					   vma->ggtt_view.rotated.plane[0].stride,
188 					   vma->ggtt_view.rotated.plane[0].offset,
189 					   vma->ggtt_view.rotated.plane[1].width,
190 					   vma->ggtt_view.rotated.plane[1].height,
191 					   vma->ggtt_view.rotated.plane[1].stride,
192 					   vma->ggtt_view.rotated.plane[1].offset);
193 				break;
194 
195 			case I915_GGTT_VIEW_REMAPPED:
196 				seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
197 					   vma->ggtt_view.remapped.plane[0].width,
198 					   vma->ggtt_view.remapped.plane[0].height,
199 					   vma->ggtt_view.remapped.plane[0].stride,
200 					   vma->ggtt_view.remapped.plane[0].offset,
201 					   vma->ggtt_view.remapped.plane[1].width,
202 					   vma->ggtt_view.remapped.plane[1].height,
203 					   vma->ggtt_view.remapped.plane[1].stride,
204 					   vma->ggtt_view.remapped.plane[1].offset);
205 				break;
206 
207 			default:
208 				MISSING_CASE(vma->ggtt_view.type);
209 				break;
210 			}
211 		}
212 		if (vma->fence)
213 			seq_printf(m, " , fence: %d%s",
214 				   vma->fence->id,
215 				   i915_active_request_isset(&vma->last_fence) ? "*" : "");
216 		seq_puts(m, ")");
217 
218 		spin_lock(&obj->vma.lock);
219 	}
220 	spin_unlock(&obj->vma.lock);
221 
222 	seq_printf(m, " (pinned x %d)", pin_count);
223 	if (obj->stolen)
224 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
225 	if (obj->pin_global)
226 		seq_printf(m, " (global)");
227 
228 	engine = i915_gem_object_last_write_engine(obj);
229 	if (engine)
230 		seq_printf(m, " (%s)", engine->name);
231 
232 	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
233 	if (frontbuffer_bits)
234 		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
235 }
236 
237 struct file_stats {
238 	struct i915_address_space *vm;
239 	unsigned long count;
240 	u64 total, unbound;
241 	u64 global, shared;
242 	u64 active, inactive;
243 	u64 closed;
244 };
245 
246 static int per_file_stats(int id, void *ptr, void *data)
247 {
248 	struct drm_i915_gem_object *obj = ptr;
249 	struct file_stats *stats = data;
250 	struct i915_vma *vma;
251 
252 	lockdep_assert_held(&obj->base.dev->struct_mutex);
253 
254 	stats->count++;
255 	stats->total += obj->base.size;
256 	if (!atomic_read(&obj->bind_count))
257 		stats->unbound += obj->base.size;
258 	if (obj->base.name || obj->base.dma_buf)
259 		stats->shared += obj->base.size;
260 
261 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
262 		if (!drm_mm_node_allocated(&vma->node))
263 			continue;
264 
265 		if (i915_vma_is_ggtt(vma)) {
266 			stats->global += vma->node.size;
267 		} else {
268 			if (vma->vm != stats->vm)
269 				continue;
270 		}
271 
272 		if (i915_vma_is_active(vma))
273 			stats->active += vma->node.size;
274 		else
275 			stats->inactive += vma->node.size;
276 
277 		if (i915_vma_is_closed(vma))
278 			stats->closed += vma->node.size;
279 	}
280 
281 	return 0;
282 }
283 
284 #define print_file_stats(m, name, stats) do { \
285 	if (stats.count) \
286 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
287 			   name, \
288 			   stats.count, \
289 			   stats.total, \
290 			   stats.active, \
291 			   stats.inactive, \
292 			   stats.global, \
293 			   stats.shared, \
294 			   stats.unbound, \
295 			   stats.closed); \
296 } while (0)
297 
298 static void print_batch_pool_stats(struct seq_file *m,
299 				   struct drm_i915_private *dev_priv)
300 {
301 	struct drm_i915_gem_object *obj;
302 	struct intel_engine_cs *engine;
303 	struct file_stats stats = {};
304 	enum intel_engine_id id;
305 	int j;
306 
307 	for_each_engine(engine, dev_priv, id) {
308 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
309 			list_for_each_entry(obj,
310 					    &engine->batch_pool.cache_list[j],
311 					    batch_pool_link)
312 				per_file_stats(0, obj, &stats);
313 		}
314 	}
315 
316 	print_file_stats(m, "[k]batch pool", stats);
317 }
318 
319 static void print_context_stats(struct seq_file *m,
320 				struct drm_i915_private *i915)
321 {
322 	struct file_stats kstats = {};
323 	struct i915_gem_context *ctx;
324 
325 	list_for_each_entry(ctx, &i915->contexts.list, link) {
326 		struct i915_gem_engines_iter it;
327 		struct intel_context *ce;
328 
329 		for_each_gem_engine(ce,
330 				    i915_gem_context_lock_engines(ctx), it) {
331 			if (ce->state)
332 				per_file_stats(0, ce->state->obj, &kstats);
333 			if (ce->ring)
334 				per_file_stats(0, ce->ring->vma->obj, &kstats);
335 		}
336 		i915_gem_context_unlock_engines(ctx);
337 
338 		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
339 			struct file_stats stats = { .vm = ctx->vm, };
340 			struct drm_file *file = ctx->file_priv->file;
341 			struct task_struct *task;
342 			char name[80];
343 
344 			spin_lock(&file->table_lock);
345 			idr_for_each(&file->object_idr, per_file_stats, &stats);
346 			spin_unlock(&file->table_lock);
347 
348 			rcu_read_lock();
349 			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
350 			snprintf(name, sizeof(name), "%s",
351 				 task ? task->comm : "<unknown>");
352 			rcu_read_unlock();
353 
354 			print_file_stats(m, name, stats);
355 		}
356 	}
357 
358 	print_file_stats(m, "[k]contexts", kstats);
359 }
360 
361 static int i915_gem_object_info(struct seq_file *m, void *data)
362 {
363 	struct drm_i915_private *i915 = node_to_i915(m->private);
364 	int ret;
365 
366 	seq_printf(m, "%u shrinkable objects, %llu bytes\n",
367 		   i915->mm.shrink_count,
368 		   i915->mm.shrink_memory);
369 
370 	seq_putc(m, '\n');
371 
372 	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
373 	if (ret)
374 		return ret;
375 
376 	print_batch_pool_stats(m, i915);
377 	print_context_stats(m, i915);
378 	mutex_unlock(&i915->drm.struct_mutex);
379 
380 	return 0;
381 }
382 
383 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
384 {
385 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
386 	struct drm_device *dev = &dev_priv->drm;
387 	struct drm_i915_gem_object *obj;
388 	struct intel_engine_cs *engine;
389 	enum intel_engine_id id;
390 	int total = 0;
391 	int ret, j;
392 
393 	ret = mutex_lock_interruptible(&dev->struct_mutex);
394 	if (ret)
395 		return ret;
396 
397 	for_each_engine(engine, dev_priv, id) {
398 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
399 			int count;
400 
401 			count = 0;
402 			list_for_each_entry(obj,
403 					    &engine->batch_pool.cache_list[j],
404 					    batch_pool_link)
405 				count++;
406 			seq_printf(m, "%s cache[%d]: %d objects\n",
407 				   engine->name, j, count);
408 
409 			list_for_each_entry(obj,
410 					    &engine->batch_pool.cache_list[j],
411 					    batch_pool_link) {
412 				seq_puts(m, "   ");
413 				describe_obj(m, obj);
414 				seq_putc(m, '\n');
415 			}
416 
417 			total += count;
418 		}
419 	}
420 
421 	seq_printf(m, "total: %d\n", total);
422 
423 	mutex_unlock(&dev->struct_mutex);
424 
425 	return 0;
426 }
427 
428 static void gen8_display_interrupt_info(struct seq_file *m)
429 {
430 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
431 	int pipe;
432 
433 	for_each_pipe(dev_priv, pipe) {
434 		enum intel_display_power_domain power_domain;
435 		intel_wakeref_t wakeref;
436 
437 		power_domain = POWER_DOMAIN_PIPE(pipe);
438 		wakeref = intel_display_power_get_if_enabled(dev_priv,
439 							     power_domain);
440 		if (!wakeref) {
441 			seq_printf(m, "Pipe %c power disabled\n",
442 				   pipe_name(pipe));
443 			continue;
444 		}
445 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
446 			   pipe_name(pipe),
447 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
448 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
449 			   pipe_name(pipe),
450 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
451 		seq_printf(m, "Pipe %c IER:\t%08x\n",
452 			   pipe_name(pipe),
453 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
454 
455 		intel_display_power_put(dev_priv, power_domain, wakeref);
456 	}
457 
458 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
459 		   I915_READ(GEN8_DE_PORT_IMR));
460 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
461 		   I915_READ(GEN8_DE_PORT_IIR));
462 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
463 		   I915_READ(GEN8_DE_PORT_IER));
464 
465 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
466 		   I915_READ(GEN8_DE_MISC_IMR));
467 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
468 		   I915_READ(GEN8_DE_MISC_IIR));
469 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
470 		   I915_READ(GEN8_DE_MISC_IER));
471 
472 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
473 		   I915_READ(GEN8_PCU_IMR));
474 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
475 		   I915_READ(GEN8_PCU_IIR));
476 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
477 		   I915_READ(GEN8_PCU_IER));
478 }
479 
480 static int i915_interrupt_info(struct seq_file *m, void *data)
481 {
482 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
483 	struct intel_engine_cs *engine;
484 	enum intel_engine_id id;
485 	intel_wakeref_t wakeref;
486 	int i, pipe;
487 
488 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
489 
490 	if (IS_CHERRYVIEW(dev_priv)) {
491 		intel_wakeref_t pref;
492 
493 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
494 			   I915_READ(GEN8_MASTER_IRQ));
495 
496 		seq_printf(m, "Display IER:\t%08x\n",
497 			   I915_READ(VLV_IER));
498 		seq_printf(m, "Display IIR:\t%08x\n",
499 			   I915_READ(VLV_IIR));
500 		seq_printf(m, "Display IIR_RW:\t%08x\n",
501 			   I915_READ(VLV_IIR_RW));
502 		seq_printf(m, "Display IMR:\t%08x\n",
503 			   I915_READ(VLV_IMR));
504 		for_each_pipe(dev_priv, pipe) {
505 			enum intel_display_power_domain power_domain;
506 
507 			power_domain = POWER_DOMAIN_PIPE(pipe);
508 			pref = intel_display_power_get_if_enabled(dev_priv,
509 								  power_domain);
510 			if (!pref) {
511 				seq_printf(m, "Pipe %c power disabled\n",
512 					   pipe_name(pipe));
513 				continue;
514 			}
515 
516 			seq_printf(m, "Pipe %c stat:\t%08x\n",
517 				   pipe_name(pipe),
518 				   I915_READ(PIPESTAT(pipe)));
519 
520 			intel_display_power_put(dev_priv, power_domain, pref);
521 		}
522 
523 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
524 		seq_printf(m, "Port hotplug:\t%08x\n",
525 			   I915_READ(PORT_HOTPLUG_EN));
526 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
527 			   I915_READ(VLV_DPFLIPSTAT));
528 		seq_printf(m, "DPINVGTT:\t%08x\n",
529 			   I915_READ(DPINVGTT));
530 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
531 
532 		for (i = 0; i < 4; i++) {
533 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
534 				   i, I915_READ(GEN8_GT_IMR(i)));
535 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
536 				   i, I915_READ(GEN8_GT_IIR(i)));
537 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
538 				   i, I915_READ(GEN8_GT_IER(i)));
539 		}
540 
541 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
542 			   I915_READ(GEN8_PCU_IMR));
543 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
544 			   I915_READ(GEN8_PCU_IIR));
545 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
546 			   I915_READ(GEN8_PCU_IER));
547 	} else if (INTEL_GEN(dev_priv) >= 11) {
548 		seq_printf(m, "Master Interrupt Control:  %08x\n",
549 			   I915_READ(GEN11_GFX_MSTR_IRQ));
550 
551 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
552 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
553 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
554 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
555 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
556 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
557 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
558 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
559 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
560 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
561 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
562 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
563 
564 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
565 			   I915_READ(GEN11_DISPLAY_INT_CTL));
566 
567 		gen8_display_interrupt_info(m);
568 	} else if (INTEL_GEN(dev_priv) >= 8) {
569 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
570 			   I915_READ(GEN8_MASTER_IRQ));
571 
572 		for (i = 0; i < 4; i++) {
573 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
574 				   i, I915_READ(GEN8_GT_IMR(i)));
575 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
576 				   i, I915_READ(GEN8_GT_IIR(i)));
577 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
578 				   i, I915_READ(GEN8_GT_IER(i)));
579 		}
580 
581 		gen8_display_interrupt_info(m);
582 	} else if (IS_VALLEYVIEW(dev_priv)) {
583 		seq_printf(m, "Display IER:\t%08x\n",
584 			   I915_READ(VLV_IER));
585 		seq_printf(m, "Display IIR:\t%08x\n",
586 			   I915_READ(VLV_IIR));
587 		seq_printf(m, "Display IIR_RW:\t%08x\n",
588 			   I915_READ(VLV_IIR_RW));
589 		seq_printf(m, "Display IMR:\t%08x\n",
590 			   I915_READ(VLV_IMR));
591 		for_each_pipe(dev_priv, pipe) {
592 			enum intel_display_power_domain power_domain;
593 			intel_wakeref_t pref;
594 
595 			power_domain = POWER_DOMAIN_PIPE(pipe);
596 			pref = intel_display_power_get_if_enabled(dev_priv,
597 								  power_domain);
598 			if (!pref) {
599 				seq_printf(m, "Pipe %c power disabled\n",
600 					   pipe_name(pipe));
601 				continue;
602 			}
603 
604 			seq_printf(m, "Pipe %c stat:\t%08x\n",
605 				   pipe_name(pipe),
606 				   I915_READ(PIPESTAT(pipe)));
607 			intel_display_power_put(dev_priv, power_domain, pref);
608 		}
609 
610 		seq_printf(m, "Master IER:\t%08x\n",
611 			   I915_READ(VLV_MASTER_IER));
612 
613 		seq_printf(m, "Render IER:\t%08x\n",
614 			   I915_READ(GTIER));
615 		seq_printf(m, "Render IIR:\t%08x\n",
616 			   I915_READ(GTIIR));
617 		seq_printf(m, "Render IMR:\t%08x\n",
618 			   I915_READ(GTIMR));
619 
620 		seq_printf(m, "PM IER:\t\t%08x\n",
621 			   I915_READ(GEN6_PMIER));
622 		seq_printf(m, "PM IIR:\t\t%08x\n",
623 			   I915_READ(GEN6_PMIIR));
624 		seq_printf(m, "PM IMR:\t\t%08x\n",
625 			   I915_READ(GEN6_PMIMR));
626 
627 		seq_printf(m, "Port hotplug:\t%08x\n",
628 			   I915_READ(PORT_HOTPLUG_EN));
629 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
630 			   I915_READ(VLV_DPFLIPSTAT));
631 		seq_printf(m, "DPINVGTT:\t%08x\n",
632 			   I915_READ(DPINVGTT));
633 
634 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
635 		seq_printf(m, "Interrupt enable:    %08x\n",
636 			   I915_READ(GEN2_IER));
637 		seq_printf(m, "Interrupt identity:  %08x\n",
638 			   I915_READ(GEN2_IIR));
639 		seq_printf(m, "Interrupt mask:      %08x\n",
640 			   I915_READ(GEN2_IMR));
641 		for_each_pipe(dev_priv, pipe)
642 			seq_printf(m, "Pipe %c stat:         %08x\n",
643 				   pipe_name(pipe),
644 				   I915_READ(PIPESTAT(pipe)));
645 	} else {
646 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
647 			   I915_READ(DEIER));
648 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
649 			   I915_READ(DEIIR));
650 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
651 			   I915_READ(DEIMR));
652 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
653 			   I915_READ(SDEIER));
654 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
655 			   I915_READ(SDEIIR));
656 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
657 			   I915_READ(SDEIMR));
658 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
659 			   I915_READ(GTIER));
660 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
661 			   I915_READ(GTIIR));
662 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
663 			   I915_READ(GTIMR));
664 	}
665 
666 	if (INTEL_GEN(dev_priv) >= 11) {
667 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
668 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
669 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
670 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
671 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
672 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
673 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
674 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
675 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
676 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
677 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
678 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
679 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
680 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
681 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
682 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
683 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
684 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
685 
686 	} else if (INTEL_GEN(dev_priv) >= 6) {
687 		for_each_engine(engine, dev_priv, id) {
688 			seq_printf(m,
689 				   "Graphics Interrupt mask (%s):	%08x\n",
690 				   engine->name, ENGINE_READ(engine, RING_IMR));
691 		}
692 	}
693 
694 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
695 
696 	return 0;
697 }
698 
699 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
700 {
701 	struct drm_i915_private *i915 = node_to_i915(m->private);
702 	unsigned int i;
703 
704 	seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
705 
706 	rcu_read_lock();
707 	for (i = 0; i < i915->ggtt.num_fences; i++) {
708 		struct i915_vma *vma = i915->ggtt.fence_regs[i].vma;
709 
710 		seq_printf(m, "Fence %d, pin count = %d, object = ",
711 			   i, i915->ggtt.fence_regs[i].pin_count);
712 		if (!vma)
713 			seq_puts(m, "unused");
714 		else
715 			describe_obj(m, vma->obj);
716 		seq_putc(m, '\n');
717 	}
718 	rcu_read_unlock();
719 
720 	return 0;
721 }
722 
723 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
724 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
725 			      size_t count, loff_t *pos)
726 {
727 	struct i915_gpu_state *error;
728 	ssize_t ret;
729 	void *buf;
730 
731 	error = file->private_data;
732 	if (!error)
733 		return 0;
734 
735 	/* Bounce buffer required because of kernfs __user API convenience. */
736 	buf = kmalloc(count, GFP_KERNEL);
737 	if (!buf)
738 		return -ENOMEM;
739 
740 	ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
741 	if (ret <= 0)
742 		goto out;
743 
744 	if (!copy_to_user(ubuf, buf, ret))
745 		*pos += ret;
746 	else
747 		ret = -EFAULT;
748 
749 out:
750 	kfree(buf);
751 	return ret;
752 }
753 
754 static int gpu_state_release(struct inode *inode, struct file *file)
755 {
756 	i915_gpu_state_put(file->private_data);
757 	return 0;
758 }
759 
760 static int i915_gpu_info_open(struct inode *inode, struct file *file)
761 {
762 	struct drm_i915_private *i915 = inode->i_private;
763 	struct i915_gpu_state *gpu;
764 	intel_wakeref_t wakeref;
765 
766 	gpu = NULL;
767 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
768 		gpu = i915_capture_gpu_state(i915);
769 	if (IS_ERR(gpu))
770 		return PTR_ERR(gpu);
771 
772 	file->private_data = gpu;
773 	return 0;
774 }
775 
776 static const struct file_operations i915_gpu_info_fops = {
777 	.owner = THIS_MODULE,
778 	.open = i915_gpu_info_open,
779 	.read = gpu_state_read,
780 	.llseek = default_llseek,
781 	.release = gpu_state_release,
782 };
783 
784 static ssize_t
785 i915_error_state_write(struct file *filp,
786 		       const char __user *ubuf,
787 		       size_t cnt,
788 		       loff_t *ppos)
789 {
790 	struct i915_gpu_state *error = filp->private_data;
791 
792 	if (!error)
793 		return 0;
794 
795 	DRM_DEBUG_DRIVER("Resetting error state\n");
796 	i915_reset_error_state(error->i915);
797 
798 	return cnt;
799 }
800 
801 static int i915_error_state_open(struct inode *inode, struct file *file)
802 {
803 	struct i915_gpu_state *error;
804 
805 	error = i915_first_error_state(inode->i_private);
806 	if (IS_ERR(error))
807 		return PTR_ERR(error);
808 
809 	file->private_data  = error;
810 	return 0;
811 }
812 
813 static const struct file_operations i915_error_state_fops = {
814 	.owner = THIS_MODULE,
815 	.open = i915_error_state_open,
816 	.read = gpu_state_read,
817 	.write = i915_error_state_write,
818 	.llseek = default_llseek,
819 	.release = gpu_state_release,
820 };
821 #endif
822 
823 static int i915_frequency_info(struct seq_file *m, void *unused)
824 {
825 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
826 	struct intel_uncore *uncore = &dev_priv->uncore;
827 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
828 	intel_wakeref_t wakeref;
829 	int ret = 0;
830 
831 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
832 
833 	if (IS_GEN(dev_priv, 5)) {
834 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
835 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
836 
837 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
838 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
839 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
840 			   MEMSTAT_VID_SHIFT);
841 		seq_printf(m, "Current P-state: %d\n",
842 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
843 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
844 		u32 rpmodectl, freq_sts;
845 
846 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
847 		seq_printf(m, "Video Turbo Mode: %s\n",
848 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
849 		seq_printf(m, "HW control enabled: %s\n",
850 			   yesno(rpmodectl & GEN6_RP_ENABLE));
851 		seq_printf(m, "SW control enabled: %s\n",
852 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
853 				  GEN6_RP_MEDIA_SW_MODE));
854 
855 		vlv_punit_get(dev_priv);
856 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
857 		vlv_punit_put(dev_priv);
858 
859 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
860 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
861 
862 		seq_printf(m, "actual GPU freq: %d MHz\n",
863 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
864 
865 		seq_printf(m, "current GPU freq: %d MHz\n",
866 			   intel_gpu_freq(dev_priv, rps->cur_freq));
867 
868 		seq_printf(m, "max GPU freq: %d MHz\n",
869 			   intel_gpu_freq(dev_priv, rps->max_freq));
870 
871 		seq_printf(m, "min GPU freq: %d MHz\n",
872 			   intel_gpu_freq(dev_priv, rps->min_freq));
873 
874 		seq_printf(m, "idle GPU freq: %d MHz\n",
875 			   intel_gpu_freq(dev_priv, rps->idle_freq));
876 
877 		seq_printf(m,
878 			   "efficient (RPe) frequency: %d MHz\n",
879 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
880 	} else if (INTEL_GEN(dev_priv) >= 6) {
881 		u32 rp_state_limits;
882 		u32 gt_perf_status;
883 		u32 rp_state_cap;
884 		u32 rpmodectl, rpinclimit, rpdeclimit;
885 		u32 rpstat, cagf, reqf;
886 		u32 rpupei, rpcurup, rpprevup;
887 		u32 rpdownei, rpcurdown, rpprevdown;
888 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
889 		int max_freq;
890 
891 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
892 		if (IS_GEN9_LP(dev_priv)) {
893 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
894 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
895 		} else {
896 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
897 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
898 		}
899 
900 		/* RPSTAT1 is in the GT power well */
901 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
902 
903 		reqf = I915_READ(GEN6_RPNSWREQ);
904 		if (INTEL_GEN(dev_priv) >= 9)
905 			reqf >>= 23;
906 		else {
907 			reqf &= ~GEN6_TURBO_DISABLE;
908 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
909 				reqf >>= 24;
910 			else
911 				reqf >>= 25;
912 		}
913 		reqf = intel_gpu_freq(dev_priv, reqf);
914 
915 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
916 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
917 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
918 
919 		rpstat = I915_READ(GEN6_RPSTAT1);
920 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
921 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
922 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
923 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
924 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
925 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
926 		cagf = intel_gpu_freq(dev_priv,
927 				      intel_get_cagf(dev_priv, rpstat));
928 
929 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
930 
931 		if (INTEL_GEN(dev_priv) >= 11) {
932 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
933 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
934 			/*
935 			 * The equivalent to the PM ISR & IIR cannot be read
936 			 * without affecting the current state of the system
937 			 */
938 			pm_isr = 0;
939 			pm_iir = 0;
940 		} else if (INTEL_GEN(dev_priv) >= 8) {
941 			pm_ier = I915_READ(GEN8_GT_IER(2));
942 			pm_imr = I915_READ(GEN8_GT_IMR(2));
943 			pm_isr = I915_READ(GEN8_GT_ISR(2));
944 			pm_iir = I915_READ(GEN8_GT_IIR(2));
945 		} else {
946 			pm_ier = I915_READ(GEN6_PMIER);
947 			pm_imr = I915_READ(GEN6_PMIMR);
948 			pm_isr = I915_READ(GEN6_PMISR);
949 			pm_iir = I915_READ(GEN6_PMIIR);
950 		}
951 		pm_mask = I915_READ(GEN6_PMINTRMSK);
952 
953 		seq_printf(m, "Video Turbo Mode: %s\n",
954 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
955 		seq_printf(m, "HW control enabled: %s\n",
956 			   yesno(rpmodectl & GEN6_RP_ENABLE));
957 		seq_printf(m, "SW control enabled: %s\n",
958 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
959 				  GEN6_RP_MEDIA_SW_MODE));
960 
961 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
962 			   pm_ier, pm_imr, pm_mask);
963 		if (INTEL_GEN(dev_priv) <= 10)
964 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
965 				   pm_isr, pm_iir);
966 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
967 			   rps->pm_intrmsk_mbz);
968 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
969 		seq_printf(m, "Render p-state ratio: %d\n",
970 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
971 		seq_printf(m, "Render p-state VID: %d\n",
972 			   gt_perf_status & 0xff);
973 		seq_printf(m, "Render p-state limit: %d\n",
974 			   rp_state_limits & 0xff);
975 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
976 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
977 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
978 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
979 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
980 		seq_printf(m, "CAGF: %dMHz\n", cagf);
981 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
982 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
983 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
984 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
985 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
986 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
987 		seq_printf(m, "Up threshold: %d%%\n",
988 			   rps->power.up_threshold);
989 
990 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
991 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
992 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
993 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
994 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
995 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
996 		seq_printf(m, "Down threshold: %d%%\n",
997 			   rps->power.down_threshold);
998 
999 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1000 			    rp_state_cap >> 16) & 0xff;
1001 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1002 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1003 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1004 			   intel_gpu_freq(dev_priv, max_freq));
1005 
1006 		max_freq = (rp_state_cap & 0xff00) >> 8;
1007 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1008 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1009 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1010 			   intel_gpu_freq(dev_priv, max_freq));
1011 
1012 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1013 			    rp_state_cap >> 0) & 0xff;
1014 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1015 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1016 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1017 			   intel_gpu_freq(dev_priv, max_freq));
1018 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1019 			   intel_gpu_freq(dev_priv, rps->max_freq));
1020 
1021 		seq_printf(m, "Current freq: %d MHz\n",
1022 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1023 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1024 		seq_printf(m, "Idle freq: %d MHz\n",
1025 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1026 		seq_printf(m, "Min freq: %d MHz\n",
1027 			   intel_gpu_freq(dev_priv, rps->min_freq));
1028 		seq_printf(m, "Boost freq: %d MHz\n",
1029 			   intel_gpu_freq(dev_priv, rps->boost_freq));
1030 		seq_printf(m, "Max freq: %d MHz\n",
1031 			   intel_gpu_freq(dev_priv, rps->max_freq));
1032 		seq_printf(m,
1033 			   "efficient (RPe) frequency: %d MHz\n",
1034 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1035 	} else {
1036 		seq_puts(m, "no P-state info available\n");
1037 	}
1038 
1039 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1040 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1041 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1042 
1043 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1044 	return ret;
1045 }
1046 
1047 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1048 			       struct seq_file *m,
1049 			       struct intel_instdone *instdone)
1050 {
1051 	int slice;
1052 	int subslice;
1053 
1054 	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1055 		   instdone->instdone);
1056 
1057 	if (INTEL_GEN(dev_priv) <= 3)
1058 		return;
1059 
1060 	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1061 		   instdone->slice_common);
1062 
1063 	if (INTEL_GEN(dev_priv) <= 6)
1064 		return;
1065 
1066 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1067 		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1068 			   slice, subslice, instdone->sampler[slice][subslice]);
1069 
1070 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1071 		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1072 			   slice, subslice, instdone->row[slice][subslice]);
1073 }
1074 
1075 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1076 {
1077 	struct drm_i915_private *i915 = node_to_i915(m->private);
1078 	struct intel_gt *gt = &i915->gt;
1079 	struct intel_engine_cs *engine;
1080 	intel_wakeref_t wakeref;
1081 	enum intel_engine_id id;
1082 
1083 	seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
1084 	if (test_bit(I915_WEDGED, &gt->reset.flags))
1085 		seq_puts(m, "\tWedged\n");
1086 	if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
1087 		seq_puts(m, "\tDevice (global) reset in progress\n");
1088 
1089 	if (!i915_modparams.enable_hangcheck) {
1090 		seq_puts(m, "Hangcheck disabled\n");
1091 		return 0;
1092 	}
1093 
1094 	if (timer_pending(&gt->hangcheck.work.timer))
1095 		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1096 			   jiffies_to_msecs(gt->hangcheck.work.timer.expires -
1097 					    jiffies));
1098 	else if (delayed_work_pending(&gt->hangcheck.work))
1099 		seq_puts(m, "Hangcheck active, work pending\n");
1100 	else
1101 		seq_puts(m, "Hangcheck inactive\n");
1102 
1103 	seq_printf(m, "GT active? %s\n", yesno(gt->awake));
1104 
1105 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1106 		for_each_engine(engine, i915, id) {
1107 			struct intel_instdone instdone;
1108 
1109 			seq_printf(m, "%s: %d ms ago\n",
1110 				   engine->name,
1111 				   jiffies_to_msecs(jiffies -
1112 						    engine->hangcheck.action_timestamp));
1113 
1114 			seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1115 				   (long long)engine->hangcheck.acthd,
1116 				   intel_engine_get_active_head(engine));
1117 
1118 			intel_engine_get_instdone(engine, &instdone);
1119 
1120 			seq_puts(m, "\tinstdone read =\n");
1121 			i915_instdone_info(i915, m, &instdone);
1122 
1123 			seq_puts(m, "\tinstdone accu =\n");
1124 			i915_instdone_info(i915, m,
1125 					   &engine->hangcheck.instdone);
1126 		}
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static int ironlake_drpc_info(struct seq_file *m)
1133 {
1134 	struct drm_i915_private *i915 = node_to_i915(m->private);
1135 	struct intel_uncore *uncore = &i915->uncore;
1136 	u32 rgvmodectl, rstdbyctl;
1137 	u16 crstandvid;
1138 
1139 	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1140 	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1141 	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1142 
1143 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1144 	seq_printf(m, "Boost freq: %d\n",
1145 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1146 		   MEMMODE_BOOST_FREQ_SHIFT);
1147 	seq_printf(m, "HW control enabled: %s\n",
1148 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1149 	seq_printf(m, "SW control enabled: %s\n",
1150 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1151 	seq_printf(m, "Gated voltage change: %s\n",
1152 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1153 	seq_printf(m, "Starting frequency: P%d\n",
1154 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1155 	seq_printf(m, "Max P-state: P%d\n",
1156 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1157 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1158 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1159 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1160 	seq_printf(m, "Render standby enabled: %s\n",
1161 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1162 	seq_puts(m, "Current RS state: ");
1163 	switch (rstdbyctl & RSX_STATUS_MASK) {
1164 	case RSX_STATUS_ON:
1165 		seq_puts(m, "on\n");
1166 		break;
1167 	case RSX_STATUS_RC1:
1168 		seq_puts(m, "RC1\n");
1169 		break;
1170 	case RSX_STATUS_RC1E:
1171 		seq_puts(m, "RC1E\n");
1172 		break;
1173 	case RSX_STATUS_RS1:
1174 		seq_puts(m, "RS1\n");
1175 		break;
1176 	case RSX_STATUS_RS2:
1177 		seq_puts(m, "RS2 (RC6)\n");
1178 		break;
1179 	case RSX_STATUS_RS3:
1180 		seq_puts(m, "RC3 (RC6+)\n");
1181 		break;
1182 	default:
1183 		seq_puts(m, "unknown\n");
1184 		break;
1185 	}
1186 
1187 	return 0;
1188 }
1189 
1190 static int i915_forcewake_domains(struct seq_file *m, void *data)
1191 {
1192 	struct drm_i915_private *i915 = node_to_i915(m->private);
1193 	struct intel_uncore *uncore = &i915->uncore;
1194 	struct intel_uncore_forcewake_domain *fw_domain;
1195 	unsigned int tmp;
1196 
1197 	seq_printf(m, "user.bypass_count = %u\n",
1198 		   uncore->user_forcewake.count);
1199 
1200 	for_each_fw_domain(fw_domain, uncore, tmp)
1201 		seq_printf(m, "%s.wake_count = %u\n",
1202 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1203 			   READ_ONCE(fw_domain->wake_count));
1204 
1205 	return 0;
1206 }
1207 
1208 static void print_rc6_res(struct seq_file *m,
1209 			  const char *title,
1210 			  const i915_reg_t reg)
1211 {
1212 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1213 
1214 	seq_printf(m, "%s %u (%llu us)\n",
1215 		   title, I915_READ(reg),
1216 		   intel_rc6_residency_us(dev_priv, reg));
1217 }
1218 
1219 static int vlv_drpc_info(struct seq_file *m)
1220 {
1221 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1222 	u32 rcctl1, pw_status;
1223 
1224 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1225 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1226 
1227 	seq_printf(m, "RC6 Enabled: %s\n",
1228 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1229 					GEN6_RC_CTL_EI_MODE(1))));
1230 	seq_printf(m, "Render Power Well: %s\n",
1231 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1232 	seq_printf(m, "Media Power Well: %s\n",
1233 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1234 
1235 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1236 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1237 
1238 	return i915_forcewake_domains(m, NULL);
1239 }
1240 
1241 static int gen6_drpc_info(struct seq_file *m)
1242 {
1243 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1244 	u32 gt_core_status, rcctl1, rc6vids = 0;
1245 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1246 
1247 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1248 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1249 
1250 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1251 	if (INTEL_GEN(dev_priv) >= 9) {
1252 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1253 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1254 	}
1255 
1256 	if (INTEL_GEN(dev_priv) <= 7)
1257 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1258 				       &rc6vids, NULL);
1259 
1260 	seq_printf(m, "RC1e Enabled: %s\n",
1261 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1262 	seq_printf(m, "RC6 Enabled: %s\n",
1263 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1264 	if (INTEL_GEN(dev_priv) >= 9) {
1265 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1266 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1267 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1268 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1269 	}
1270 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1271 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1272 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1273 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1274 	seq_puts(m, "Current RC state: ");
1275 	switch (gt_core_status & GEN6_RCn_MASK) {
1276 	case GEN6_RC0:
1277 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1278 			seq_puts(m, "Core Power Down\n");
1279 		else
1280 			seq_puts(m, "on\n");
1281 		break;
1282 	case GEN6_RC3:
1283 		seq_puts(m, "RC3\n");
1284 		break;
1285 	case GEN6_RC6:
1286 		seq_puts(m, "RC6\n");
1287 		break;
1288 	case GEN6_RC7:
1289 		seq_puts(m, "RC7\n");
1290 		break;
1291 	default:
1292 		seq_puts(m, "Unknown\n");
1293 		break;
1294 	}
1295 
1296 	seq_printf(m, "Core Power Down: %s\n",
1297 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1298 	if (INTEL_GEN(dev_priv) >= 9) {
1299 		seq_printf(m, "Render Power Well: %s\n",
1300 			(gen9_powergate_status &
1301 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1302 		seq_printf(m, "Media Power Well: %s\n",
1303 			(gen9_powergate_status &
1304 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1305 	}
1306 
1307 	/* Not exactly sure what this is */
1308 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1309 		      GEN6_GT_GFX_RC6_LOCKED);
1310 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1311 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1312 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1313 
1314 	if (INTEL_GEN(dev_priv) <= 7) {
1315 		seq_printf(m, "RC6   voltage: %dmV\n",
1316 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1317 		seq_printf(m, "RC6+  voltage: %dmV\n",
1318 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1319 		seq_printf(m, "RC6++ voltage: %dmV\n",
1320 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1321 	}
1322 
1323 	return i915_forcewake_domains(m, NULL);
1324 }
1325 
1326 static int i915_drpc_info(struct seq_file *m, void *unused)
1327 {
1328 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1329 	intel_wakeref_t wakeref;
1330 	int err = -ENODEV;
1331 
1332 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1333 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1334 			err = vlv_drpc_info(m);
1335 		else if (INTEL_GEN(dev_priv) >= 6)
1336 			err = gen6_drpc_info(m);
1337 		else
1338 			err = ironlake_drpc_info(m);
1339 	}
1340 
1341 	return err;
1342 }
1343 
1344 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1345 {
1346 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1347 
1348 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1349 		   dev_priv->fb_tracking.busy_bits);
1350 
1351 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1352 		   dev_priv->fb_tracking.flip_bits);
1353 
1354 	return 0;
1355 }
1356 
1357 static int i915_fbc_status(struct seq_file *m, void *unused)
1358 {
1359 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1360 	struct intel_fbc *fbc = &dev_priv->fbc;
1361 	intel_wakeref_t wakeref;
1362 
1363 	if (!HAS_FBC(dev_priv))
1364 		return -ENODEV;
1365 
1366 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1367 	mutex_lock(&fbc->lock);
1368 
1369 	if (intel_fbc_is_active(dev_priv))
1370 		seq_puts(m, "FBC enabled\n");
1371 	else
1372 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1373 
1374 	if (intel_fbc_is_active(dev_priv)) {
1375 		u32 mask;
1376 
1377 		if (INTEL_GEN(dev_priv) >= 8)
1378 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1379 		else if (INTEL_GEN(dev_priv) >= 7)
1380 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1381 		else if (INTEL_GEN(dev_priv) >= 5)
1382 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1383 		else if (IS_G4X(dev_priv))
1384 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1385 		else
1386 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1387 							FBC_STAT_COMPRESSED);
1388 
1389 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1390 	}
1391 
1392 	mutex_unlock(&fbc->lock);
1393 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1394 
1395 	return 0;
1396 }
1397 
1398 static int i915_fbc_false_color_get(void *data, u64 *val)
1399 {
1400 	struct drm_i915_private *dev_priv = data;
1401 
1402 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1403 		return -ENODEV;
1404 
1405 	*val = dev_priv->fbc.false_color;
1406 
1407 	return 0;
1408 }
1409 
1410 static int i915_fbc_false_color_set(void *data, u64 val)
1411 {
1412 	struct drm_i915_private *dev_priv = data;
1413 	u32 reg;
1414 
1415 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1416 		return -ENODEV;
1417 
1418 	mutex_lock(&dev_priv->fbc.lock);
1419 
1420 	reg = I915_READ(ILK_DPFC_CONTROL);
1421 	dev_priv->fbc.false_color = val;
1422 
1423 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1424 		   (reg | FBC_CTL_FALSE_COLOR) :
1425 		   (reg & ~FBC_CTL_FALSE_COLOR));
1426 
1427 	mutex_unlock(&dev_priv->fbc.lock);
1428 	return 0;
1429 }
1430 
1431 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1432 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1433 			"%llu\n");
1434 
1435 static int i915_ips_status(struct seq_file *m, void *unused)
1436 {
1437 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1438 	intel_wakeref_t wakeref;
1439 
1440 	if (!HAS_IPS(dev_priv))
1441 		return -ENODEV;
1442 
1443 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1444 
1445 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1446 		   yesno(i915_modparams.enable_ips));
1447 
1448 	if (INTEL_GEN(dev_priv) >= 8) {
1449 		seq_puts(m, "Currently: unknown\n");
1450 	} else {
1451 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1452 			seq_puts(m, "Currently: enabled\n");
1453 		else
1454 			seq_puts(m, "Currently: disabled\n");
1455 	}
1456 
1457 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1458 
1459 	return 0;
1460 }
1461 
1462 static int i915_sr_status(struct seq_file *m, void *unused)
1463 {
1464 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1465 	intel_wakeref_t wakeref;
1466 	bool sr_enabled = false;
1467 
1468 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1469 
1470 	if (INTEL_GEN(dev_priv) >= 9)
1471 		/* no global SR status; inspect per-plane WM */;
1472 	else if (HAS_PCH_SPLIT(dev_priv))
1473 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1474 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1475 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1476 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1477 	else if (IS_I915GM(dev_priv))
1478 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1479 	else if (IS_PINEVIEW(dev_priv))
1480 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1481 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1482 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1483 
1484 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1485 
1486 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1487 
1488 	return 0;
1489 }
1490 
1491 static int i915_emon_status(struct seq_file *m, void *unused)
1492 {
1493 	struct drm_i915_private *i915 = node_to_i915(m->private);
1494 	intel_wakeref_t wakeref;
1495 
1496 	if (!IS_GEN(i915, 5))
1497 		return -ENODEV;
1498 
1499 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1500 		unsigned long temp, chipset, gfx;
1501 
1502 		temp = i915_mch_val(i915);
1503 		chipset = i915_chipset_val(i915);
1504 		gfx = i915_gfx_val(i915);
1505 
1506 		seq_printf(m, "GMCH temp: %ld\n", temp);
1507 		seq_printf(m, "Chipset power: %ld\n", chipset);
1508 		seq_printf(m, "GFX power: %ld\n", gfx);
1509 		seq_printf(m, "Total power: %ld\n", chipset + gfx);
1510 	}
1511 
1512 	return 0;
1513 }
1514 
1515 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1516 {
1517 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1518 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1519 	unsigned int max_gpu_freq, min_gpu_freq;
1520 	intel_wakeref_t wakeref;
1521 	int gpu_freq, ia_freq;
1522 
1523 	if (!HAS_LLC(dev_priv))
1524 		return -ENODEV;
1525 
1526 	min_gpu_freq = rps->min_freq;
1527 	max_gpu_freq = rps->max_freq;
1528 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1529 		/* Convert GT frequency to 50 HZ units */
1530 		min_gpu_freq /= GEN9_FREQ_SCALER;
1531 		max_gpu_freq /= GEN9_FREQ_SCALER;
1532 	}
1533 
1534 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1535 
1536 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1537 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1538 		ia_freq = gpu_freq;
1539 		sandybridge_pcode_read(dev_priv,
1540 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1541 				       &ia_freq, NULL);
1542 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1543 			   intel_gpu_freq(dev_priv, (gpu_freq *
1544 						     (IS_GEN9_BC(dev_priv) ||
1545 						      INTEL_GEN(dev_priv) >= 10 ?
1546 						      GEN9_FREQ_SCALER : 1))),
1547 			   ((ia_freq >> 0) & 0xff) * 100,
1548 			   ((ia_freq >> 8) & 0xff) * 100);
1549 	}
1550 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1551 
1552 	return 0;
1553 }
1554 
1555 static int i915_opregion(struct seq_file *m, void *unused)
1556 {
1557 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1558 	struct drm_device *dev = &dev_priv->drm;
1559 	struct intel_opregion *opregion = &dev_priv->opregion;
1560 	int ret;
1561 
1562 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1563 	if (ret)
1564 		goto out;
1565 
1566 	if (opregion->header)
1567 		seq_write(m, opregion->header, OPREGION_SIZE);
1568 
1569 	mutex_unlock(&dev->struct_mutex);
1570 
1571 out:
1572 	return 0;
1573 }
1574 
1575 static int i915_vbt(struct seq_file *m, void *unused)
1576 {
1577 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1578 
1579 	if (opregion->vbt)
1580 		seq_write(m, opregion->vbt, opregion->vbt_size);
1581 
1582 	return 0;
1583 }
1584 
1585 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1586 {
1587 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1588 	struct drm_device *dev = &dev_priv->drm;
1589 	struct intel_framebuffer *fbdev_fb = NULL;
1590 	struct drm_framebuffer *drm_fb;
1591 	int ret;
1592 
1593 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1594 	if (ret)
1595 		return ret;
1596 
1597 #ifdef CONFIG_DRM_FBDEV_EMULATION
1598 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1599 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1600 
1601 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1602 			   fbdev_fb->base.width,
1603 			   fbdev_fb->base.height,
1604 			   fbdev_fb->base.format->depth,
1605 			   fbdev_fb->base.format->cpp[0] * 8,
1606 			   fbdev_fb->base.modifier,
1607 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1608 		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1609 		seq_putc(m, '\n');
1610 	}
1611 #endif
1612 
1613 	mutex_lock(&dev->mode_config.fb_lock);
1614 	drm_for_each_fb(drm_fb, dev) {
1615 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1616 		if (fb == fbdev_fb)
1617 			continue;
1618 
1619 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1620 			   fb->base.width,
1621 			   fb->base.height,
1622 			   fb->base.format->depth,
1623 			   fb->base.format->cpp[0] * 8,
1624 			   fb->base.modifier,
1625 			   drm_framebuffer_read_refcount(&fb->base));
1626 		describe_obj(m, intel_fb_obj(&fb->base));
1627 		seq_putc(m, '\n');
1628 	}
1629 	mutex_unlock(&dev->mode_config.fb_lock);
1630 	mutex_unlock(&dev->struct_mutex);
1631 
1632 	return 0;
1633 }
1634 
1635 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1636 {
1637 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1638 		   ring->space, ring->head, ring->tail, ring->emit);
1639 }
1640 
1641 static int i915_context_status(struct seq_file *m, void *unused)
1642 {
1643 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1644 	struct drm_device *dev = &dev_priv->drm;
1645 	struct i915_gem_context *ctx;
1646 	int ret;
1647 
1648 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1649 	if (ret)
1650 		return ret;
1651 
1652 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1653 		struct i915_gem_engines_iter it;
1654 		struct intel_context *ce;
1655 
1656 		seq_puts(m, "HW context ");
1657 		if (!list_empty(&ctx->hw_id_link))
1658 			seq_printf(m, "%x [pin %u]", ctx->hw_id,
1659 				   atomic_read(&ctx->hw_id_pin_count));
1660 		if (ctx->pid) {
1661 			struct task_struct *task;
1662 
1663 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1664 			if (task) {
1665 				seq_printf(m, "(%s [%d]) ",
1666 					   task->comm, task->pid);
1667 				put_task_struct(task);
1668 			}
1669 		} else if (IS_ERR(ctx->file_priv)) {
1670 			seq_puts(m, "(deleted) ");
1671 		} else {
1672 			seq_puts(m, "(kernel) ");
1673 		}
1674 
1675 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1676 		seq_putc(m, '\n');
1677 
1678 		for_each_gem_engine(ce,
1679 				    i915_gem_context_lock_engines(ctx), it) {
1680 			seq_printf(m, "%s: ", ce->engine->name);
1681 			if (ce->state)
1682 				describe_obj(m, ce->state->obj);
1683 			if (ce->ring)
1684 				describe_ctx_ring(m, ce->ring);
1685 			seq_putc(m, '\n');
1686 		}
1687 		i915_gem_context_unlock_engines(ctx);
1688 
1689 		seq_putc(m, '\n');
1690 	}
1691 
1692 	mutex_unlock(&dev->struct_mutex);
1693 
1694 	return 0;
1695 }
1696 
1697 static const char *swizzle_string(unsigned swizzle)
1698 {
1699 	switch (swizzle) {
1700 	case I915_BIT_6_SWIZZLE_NONE:
1701 		return "none";
1702 	case I915_BIT_6_SWIZZLE_9:
1703 		return "bit9";
1704 	case I915_BIT_6_SWIZZLE_9_10:
1705 		return "bit9/bit10";
1706 	case I915_BIT_6_SWIZZLE_9_11:
1707 		return "bit9/bit11";
1708 	case I915_BIT_6_SWIZZLE_9_10_11:
1709 		return "bit9/bit10/bit11";
1710 	case I915_BIT_6_SWIZZLE_9_17:
1711 		return "bit9/bit17";
1712 	case I915_BIT_6_SWIZZLE_9_10_17:
1713 		return "bit9/bit10/bit17";
1714 	case I915_BIT_6_SWIZZLE_UNKNOWN:
1715 		return "unknown";
1716 	}
1717 
1718 	return "bug";
1719 }
1720 
1721 static int i915_swizzle_info(struct seq_file *m, void *data)
1722 {
1723 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1724 	struct intel_uncore *uncore = &dev_priv->uncore;
1725 	intel_wakeref_t wakeref;
1726 
1727 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1728 
1729 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1730 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1731 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1732 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1733 
1734 	if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1735 		seq_printf(m, "DDC = 0x%08x\n",
1736 			   intel_uncore_read(uncore, DCC));
1737 		seq_printf(m, "DDC2 = 0x%08x\n",
1738 			   intel_uncore_read(uncore, DCC2));
1739 		seq_printf(m, "C0DRB3 = 0x%04x\n",
1740 			   intel_uncore_read16(uncore, C0DRB3));
1741 		seq_printf(m, "C1DRB3 = 0x%04x\n",
1742 			   intel_uncore_read16(uncore, C1DRB3));
1743 	} else if (INTEL_GEN(dev_priv) >= 6) {
1744 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1745 			   intel_uncore_read(uncore, MAD_DIMM_C0));
1746 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1747 			   intel_uncore_read(uncore, MAD_DIMM_C1));
1748 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1749 			   intel_uncore_read(uncore, MAD_DIMM_C2));
1750 		seq_printf(m, "TILECTL = 0x%08x\n",
1751 			   intel_uncore_read(uncore, TILECTL));
1752 		if (INTEL_GEN(dev_priv) >= 8)
1753 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1754 				   intel_uncore_read(uncore, GAMTARBMODE));
1755 		else
1756 			seq_printf(m, "ARB_MODE = 0x%08x\n",
1757 				   intel_uncore_read(uncore, ARB_MODE));
1758 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1759 			   intel_uncore_read(uncore, DISP_ARB_CTL));
1760 	}
1761 
1762 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1763 		seq_puts(m, "L-shaped memory detected\n");
1764 
1765 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1766 
1767 	return 0;
1768 }
1769 
1770 static const char *rps_power_to_str(unsigned int power)
1771 {
1772 	static const char * const strings[] = {
1773 		[LOW_POWER] = "low power",
1774 		[BETWEEN] = "mixed",
1775 		[HIGH_POWER] = "high power",
1776 	};
1777 
1778 	if (power >= ARRAY_SIZE(strings) || !strings[power])
1779 		return "unknown";
1780 
1781 	return strings[power];
1782 }
1783 
1784 static int i915_rps_boost_info(struct seq_file *m, void *data)
1785 {
1786 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1787 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1788 	u32 act_freq = rps->cur_freq;
1789 	intel_wakeref_t wakeref;
1790 
1791 	with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1792 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1793 			vlv_punit_get(dev_priv);
1794 			act_freq = vlv_punit_read(dev_priv,
1795 						  PUNIT_REG_GPU_FREQ_STS);
1796 			vlv_punit_put(dev_priv);
1797 			act_freq = (act_freq >> 8) & 0xff;
1798 		} else {
1799 			act_freq = intel_get_cagf(dev_priv,
1800 						  I915_READ(GEN6_RPSTAT1));
1801 		}
1802 	}
1803 
1804 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1805 	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1806 	seq_printf(m, "Boosts outstanding? %d\n",
1807 		   atomic_read(&rps->num_waiters));
1808 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1809 	seq_printf(m, "Frequency requested %d, actual %d\n",
1810 		   intel_gpu_freq(dev_priv, rps->cur_freq),
1811 		   intel_gpu_freq(dev_priv, act_freq));
1812 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1813 		   intel_gpu_freq(dev_priv, rps->min_freq),
1814 		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
1815 		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
1816 		   intel_gpu_freq(dev_priv, rps->max_freq));
1817 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1818 		   intel_gpu_freq(dev_priv, rps->idle_freq),
1819 		   intel_gpu_freq(dev_priv, rps->efficient_freq),
1820 		   intel_gpu_freq(dev_priv, rps->boost_freq));
1821 
1822 	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1823 
1824 	if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1825 		u32 rpup, rpupei;
1826 		u32 rpdown, rpdownei;
1827 
1828 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1829 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1830 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1831 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1832 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1833 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1834 
1835 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1836 			   rps_power_to_str(rps->power.mode));
1837 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1838 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
1839 			   rps->power.up_threshold);
1840 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1841 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1842 			   rps->power.down_threshold);
1843 	} else {
1844 		seq_puts(m, "\nRPS Autotuning inactive\n");
1845 	}
1846 
1847 	return 0;
1848 }
1849 
1850 static int i915_llc(struct seq_file *m, void *data)
1851 {
1852 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1853 	const bool edram = INTEL_GEN(dev_priv) > 8;
1854 
1855 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1856 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1857 		   dev_priv->edram_size_mb);
1858 
1859 	return 0;
1860 }
1861 
1862 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1863 {
1864 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1865 	intel_wakeref_t wakeref;
1866 	struct drm_printer p;
1867 
1868 	if (!HAS_GT_UC(dev_priv))
1869 		return -ENODEV;
1870 
1871 	p = drm_seq_file_printer(m);
1872 	intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1873 
1874 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1875 		seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1876 
1877 	return 0;
1878 }
1879 
1880 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1881 {
1882 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1883 	intel_wakeref_t wakeref;
1884 	struct drm_printer p;
1885 
1886 	if (!HAS_GT_UC(dev_priv))
1887 		return -ENODEV;
1888 
1889 	p = drm_seq_file_printer(m);
1890 	intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1891 
1892 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1893 		u32 tmp = I915_READ(GUC_STATUS);
1894 		u32 i;
1895 
1896 		seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1897 		seq_printf(m, "\tBootrom status = 0x%x\n",
1898 			   (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1899 		seq_printf(m, "\tuKernel status = 0x%x\n",
1900 			   (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1901 		seq_printf(m, "\tMIA Core status = 0x%x\n",
1902 			   (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1903 		seq_puts(m, "\nScratch registers:\n");
1904 		for (i = 0; i < 16; i++) {
1905 			seq_printf(m, "\t%2d: \t0x%x\n",
1906 				   i, I915_READ(SOFT_SCRATCH(i)));
1907 		}
1908 	}
1909 
1910 	return 0;
1911 }
1912 
1913 static const char *
1914 stringify_guc_log_type(enum guc_log_buffer_type type)
1915 {
1916 	switch (type) {
1917 	case GUC_ISR_LOG_BUFFER:
1918 		return "ISR";
1919 	case GUC_DPC_LOG_BUFFER:
1920 		return "DPC";
1921 	case GUC_CRASH_DUMP_LOG_BUFFER:
1922 		return "CRASH";
1923 	default:
1924 		MISSING_CASE(type);
1925 	}
1926 
1927 	return "";
1928 }
1929 
1930 static void i915_guc_log_info(struct seq_file *m,
1931 			      struct drm_i915_private *dev_priv)
1932 {
1933 	struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1934 	enum guc_log_buffer_type type;
1935 
1936 	if (!intel_guc_log_relay_enabled(log)) {
1937 		seq_puts(m, "GuC log relay disabled\n");
1938 		return;
1939 	}
1940 
1941 	seq_puts(m, "GuC logging stats:\n");
1942 
1943 	seq_printf(m, "\tRelay full count: %u\n",
1944 		   log->relay.full_count);
1945 
1946 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1947 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1948 			   stringify_guc_log_type(type),
1949 			   log->stats[type].flush,
1950 			   log->stats[type].sampled_overflow);
1951 	}
1952 }
1953 
1954 static void i915_guc_client_info(struct seq_file *m,
1955 				 struct drm_i915_private *dev_priv,
1956 				 struct intel_guc_client *client)
1957 {
1958 	struct intel_engine_cs *engine;
1959 	enum intel_engine_id id;
1960 	u64 tot = 0;
1961 
1962 	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1963 		client->priority, client->stage_id, client->proc_desc_offset);
1964 	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1965 		client->doorbell_id, client->doorbell_offset);
1966 
1967 	for_each_engine(engine, dev_priv, id) {
1968 		u64 submissions = client->submissions[id];
1969 		tot += submissions;
1970 		seq_printf(m, "\tSubmissions: %llu %s\n",
1971 				submissions, engine->name);
1972 	}
1973 	seq_printf(m, "\tTotal: %llu\n", tot);
1974 }
1975 
1976 static int i915_guc_info(struct seq_file *m, void *data)
1977 {
1978 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1979 	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1980 
1981 	if (!USES_GUC(dev_priv))
1982 		return -ENODEV;
1983 
1984 	i915_guc_log_info(m, dev_priv);
1985 
1986 	if (!USES_GUC_SUBMISSION(dev_priv))
1987 		return 0;
1988 
1989 	GEM_BUG_ON(!guc->execbuf_client);
1990 
1991 	seq_printf(m, "\nDoorbell map:\n");
1992 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
1993 	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
1994 
1995 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
1996 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
1997 
1998 	/* Add more as required ... */
1999 
2000 	return 0;
2001 }
2002 
2003 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2004 {
2005 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2006 	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
2007 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2008 	intel_engine_mask_t tmp;
2009 	int index;
2010 
2011 	if (!USES_GUC_SUBMISSION(dev_priv))
2012 		return -ENODEV;
2013 
2014 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2015 		struct intel_engine_cs *engine;
2016 
2017 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2018 			continue;
2019 
2020 		seq_printf(m, "GuC stage descriptor %u:\n", index);
2021 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2022 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2023 		seq_printf(m, "\tPriority: %d\n", desc->priority);
2024 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2025 		seq_printf(m, "\tEngines used: 0x%x\n",
2026 			   desc->engines_used);
2027 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2028 			   desc->db_trigger_phy,
2029 			   desc->db_trigger_cpu,
2030 			   desc->db_trigger_uk);
2031 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
2032 			   desc->process_desc);
2033 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2034 			   desc->wq_addr, desc->wq_size);
2035 		seq_putc(m, '\n');
2036 
2037 		for_each_engine(engine, dev_priv, tmp) {
2038 			u32 guc_engine_id = engine->guc_id;
2039 			struct guc_execlist_context *lrc =
2040 						&desc->lrc[guc_engine_id];
2041 
2042 			seq_printf(m, "\t%s LRC:\n", engine->name);
2043 			seq_printf(m, "\t\tContext desc: 0x%x\n",
2044 				   lrc->context_desc);
2045 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2046 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2047 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2048 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2049 			seq_putc(m, '\n');
2050 		}
2051 	}
2052 
2053 	return 0;
2054 }
2055 
2056 static int i915_guc_log_dump(struct seq_file *m, void *data)
2057 {
2058 	struct drm_info_node *node = m->private;
2059 	struct drm_i915_private *dev_priv = node_to_i915(node);
2060 	bool dump_load_err = !!node->info_ent->data;
2061 	struct drm_i915_gem_object *obj = NULL;
2062 	u32 *log;
2063 	int i = 0;
2064 
2065 	if (!HAS_GT_UC(dev_priv))
2066 		return -ENODEV;
2067 
2068 	if (dump_load_err)
2069 		obj = dev_priv->gt.uc.guc.load_err_log;
2070 	else if (dev_priv->gt.uc.guc.log.vma)
2071 		obj = dev_priv->gt.uc.guc.log.vma->obj;
2072 
2073 	if (!obj)
2074 		return 0;
2075 
2076 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2077 	if (IS_ERR(log)) {
2078 		DRM_DEBUG("Failed to pin object\n");
2079 		seq_puts(m, "(log data unaccessible)\n");
2080 		return PTR_ERR(log);
2081 	}
2082 
2083 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2084 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2085 			   *(log + i), *(log + i + 1),
2086 			   *(log + i + 2), *(log + i + 3));
2087 
2088 	seq_putc(m, '\n');
2089 
2090 	i915_gem_object_unpin_map(obj);
2091 
2092 	return 0;
2093 }
2094 
2095 static int i915_guc_log_level_get(void *data, u64 *val)
2096 {
2097 	struct drm_i915_private *dev_priv = data;
2098 
2099 	if (!USES_GUC(dev_priv))
2100 		return -ENODEV;
2101 
2102 	*val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
2103 
2104 	return 0;
2105 }
2106 
2107 static int i915_guc_log_level_set(void *data, u64 val)
2108 {
2109 	struct drm_i915_private *dev_priv = data;
2110 
2111 	if (!USES_GUC(dev_priv))
2112 		return -ENODEV;
2113 
2114 	return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
2115 }
2116 
2117 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2118 			i915_guc_log_level_get, i915_guc_log_level_set,
2119 			"%lld\n");
2120 
2121 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2122 {
2123 	struct drm_i915_private *dev_priv = inode->i_private;
2124 
2125 	if (!USES_GUC(dev_priv))
2126 		return -ENODEV;
2127 
2128 	file->private_data = &dev_priv->gt.uc.guc.log;
2129 
2130 	return intel_guc_log_relay_open(&dev_priv->gt.uc.guc.log);
2131 }
2132 
2133 static ssize_t
2134 i915_guc_log_relay_write(struct file *filp,
2135 			 const char __user *ubuf,
2136 			 size_t cnt,
2137 			 loff_t *ppos)
2138 {
2139 	struct intel_guc_log *log = filp->private_data;
2140 
2141 	intel_guc_log_relay_flush(log);
2142 
2143 	return cnt;
2144 }
2145 
2146 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2147 {
2148 	struct drm_i915_private *dev_priv = inode->i_private;
2149 
2150 	intel_guc_log_relay_close(&dev_priv->gt.uc.guc.log);
2151 
2152 	return 0;
2153 }
2154 
2155 static const struct file_operations i915_guc_log_relay_fops = {
2156 	.owner = THIS_MODULE,
2157 	.open = i915_guc_log_relay_open,
2158 	.write = i915_guc_log_relay_write,
2159 	.release = i915_guc_log_relay_release,
2160 };
2161 
2162 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2163 {
2164 	u8 val;
2165 	static const char * const sink_status[] = {
2166 		"inactive",
2167 		"transition to active, capture and display",
2168 		"active, display from RFB",
2169 		"active, capture and display on sink device timings",
2170 		"transition to inactive, capture and display, timing re-sync",
2171 		"reserved",
2172 		"reserved",
2173 		"sink internal error",
2174 	};
2175 	struct drm_connector *connector = m->private;
2176 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2177 	struct intel_dp *intel_dp =
2178 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2179 	int ret;
2180 
2181 	if (!CAN_PSR(dev_priv)) {
2182 		seq_puts(m, "PSR Unsupported\n");
2183 		return -ENODEV;
2184 	}
2185 
2186 	if (connector->status != connector_status_connected)
2187 		return -ENODEV;
2188 
2189 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2190 
2191 	if (ret == 1) {
2192 		const char *str = "unknown";
2193 
2194 		val &= DP_PSR_SINK_STATE_MASK;
2195 		if (val < ARRAY_SIZE(sink_status))
2196 			str = sink_status[val];
2197 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2198 	} else {
2199 		return ret;
2200 	}
2201 
2202 	return 0;
2203 }
2204 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2205 
2206 static void
2207 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2208 {
2209 	u32 val, status_val;
2210 	const char *status = "unknown";
2211 
2212 	if (dev_priv->psr.psr2_enabled) {
2213 		static const char * const live_status[] = {
2214 			"IDLE",
2215 			"CAPTURE",
2216 			"CAPTURE_FS",
2217 			"SLEEP",
2218 			"BUFON_FW",
2219 			"ML_UP",
2220 			"SU_STANDBY",
2221 			"FAST_SLEEP",
2222 			"DEEP_SLEEP",
2223 			"BUF_ON",
2224 			"TG_ON"
2225 		};
2226 		val = I915_READ(EDP_PSR2_STATUS);
2227 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2228 			      EDP_PSR2_STATUS_STATE_SHIFT;
2229 		if (status_val < ARRAY_SIZE(live_status))
2230 			status = live_status[status_val];
2231 	} else {
2232 		static const char * const live_status[] = {
2233 			"IDLE",
2234 			"SRDONACK",
2235 			"SRDENT",
2236 			"BUFOFF",
2237 			"BUFON",
2238 			"AUXACK",
2239 			"SRDOFFACK",
2240 			"SRDENT_ON",
2241 		};
2242 		val = I915_READ(EDP_PSR_STATUS);
2243 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2244 			      EDP_PSR_STATUS_STATE_SHIFT;
2245 		if (status_val < ARRAY_SIZE(live_status))
2246 			status = live_status[status_val];
2247 	}
2248 
2249 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2250 }
2251 
2252 static int i915_edp_psr_status(struct seq_file *m, void *data)
2253 {
2254 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2255 	struct i915_psr *psr = &dev_priv->psr;
2256 	intel_wakeref_t wakeref;
2257 	const char *status;
2258 	bool enabled;
2259 	u32 val;
2260 
2261 	if (!HAS_PSR(dev_priv))
2262 		return -ENODEV;
2263 
2264 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2265 	if (psr->dp)
2266 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2267 	seq_puts(m, "\n");
2268 
2269 	if (!psr->sink_support)
2270 		return 0;
2271 
2272 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2273 	mutex_lock(&psr->lock);
2274 
2275 	if (psr->enabled)
2276 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2277 	else
2278 		status = "disabled";
2279 	seq_printf(m, "PSR mode: %s\n", status);
2280 
2281 	if (!psr->enabled)
2282 		goto unlock;
2283 
2284 	if (psr->psr2_enabled) {
2285 		val = I915_READ(EDP_PSR2_CTL);
2286 		enabled = val & EDP_PSR2_ENABLE;
2287 	} else {
2288 		val = I915_READ(EDP_PSR_CTL);
2289 		enabled = val & EDP_PSR_ENABLE;
2290 	}
2291 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2292 		   enableddisabled(enabled), val);
2293 	psr_source_status(dev_priv, m);
2294 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2295 		   psr->busy_frontbuffer_bits);
2296 
2297 	/*
2298 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2299 	 */
2300 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2301 		val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2302 		seq_printf(m, "Performance counter: %u\n", val);
2303 	}
2304 
2305 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
2306 		seq_printf(m, "Last attempted entry at: %lld\n",
2307 			   psr->last_entry_attempt);
2308 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2309 	}
2310 
2311 	if (psr->psr2_enabled) {
2312 		u32 su_frames_val[3];
2313 		int frame;
2314 
2315 		/*
2316 		 * Reading all 3 registers before hand to minimize crossing a
2317 		 * frame boundary between register reads
2318 		 */
2319 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2320 			su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2321 
2322 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2323 
2324 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2325 			u32 su_blocks;
2326 
2327 			su_blocks = su_frames_val[frame / 3] &
2328 				    PSR2_SU_STATUS_MASK(frame);
2329 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2330 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
2331 		}
2332 	}
2333 
2334 unlock:
2335 	mutex_unlock(&psr->lock);
2336 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2337 
2338 	return 0;
2339 }
2340 
2341 static int
2342 i915_edp_psr_debug_set(void *data, u64 val)
2343 {
2344 	struct drm_i915_private *dev_priv = data;
2345 	intel_wakeref_t wakeref;
2346 	int ret;
2347 
2348 	if (!CAN_PSR(dev_priv))
2349 		return -ENODEV;
2350 
2351 	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2352 
2353 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2354 
2355 	ret = intel_psr_debug_set(dev_priv, val);
2356 
2357 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2358 
2359 	return ret;
2360 }
2361 
2362 static int
2363 i915_edp_psr_debug_get(void *data, u64 *val)
2364 {
2365 	struct drm_i915_private *dev_priv = data;
2366 
2367 	if (!CAN_PSR(dev_priv))
2368 		return -ENODEV;
2369 
2370 	*val = READ_ONCE(dev_priv->psr.debug);
2371 	return 0;
2372 }
2373 
2374 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2375 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2376 			"%llu\n");
2377 
2378 static int i915_energy_uJ(struct seq_file *m, void *data)
2379 {
2380 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2381 	unsigned long long power;
2382 	intel_wakeref_t wakeref;
2383 	u32 units;
2384 
2385 	if (INTEL_GEN(dev_priv) < 6)
2386 		return -ENODEV;
2387 
2388 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2389 		return -ENODEV;
2390 
2391 	units = (power & 0x1f00) >> 8;
2392 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2393 		power = I915_READ(MCH_SECP_NRG_STTS);
2394 
2395 	power = (1000000 * power) >> units; /* convert to uJ */
2396 	seq_printf(m, "%llu", power);
2397 
2398 	return 0;
2399 }
2400 
2401 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2402 {
2403 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2404 	struct pci_dev *pdev = dev_priv->drm.pdev;
2405 
2406 	if (!HAS_RUNTIME_PM(dev_priv))
2407 		seq_puts(m, "Runtime power management not supported\n");
2408 
2409 	seq_printf(m, "Runtime power status: %s\n",
2410 		   enableddisabled(!dev_priv->power_domains.wakeref));
2411 
2412 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2413 	seq_printf(m, "IRQs disabled: %s\n",
2414 		   yesno(!intel_irqs_enabled(dev_priv)));
2415 #ifdef CONFIG_PM
2416 	seq_printf(m, "Usage count: %d\n",
2417 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2418 #else
2419 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2420 #endif
2421 	seq_printf(m, "PCI device power state: %s [%d]\n",
2422 		   pci_power_name(pdev->current_state),
2423 		   pdev->current_state);
2424 
2425 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2426 		struct drm_printer p = drm_seq_file_printer(m);
2427 
2428 		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2429 	}
2430 
2431 	return 0;
2432 }
2433 
2434 static int i915_power_domain_info(struct seq_file *m, void *unused)
2435 {
2436 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2437 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2438 	int i;
2439 
2440 	mutex_lock(&power_domains->lock);
2441 
2442 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2443 	for (i = 0; i < power_domains->power_well_count; i++) {
2444 		struct i915_power_well *power_well;
2445 		enum intel_display_power_domain power_domain;
2446 
2447 		power_well = &power_domains->power_wells[i];
2448 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
2449 			   power_well->count);
2450 
2451 		for_each_power_domain(power_domain, power_well->desc->domains)
2452 			seq_printf(m, "  %-23s %d\n",
2453 				 intel_display_power_domain_str(dev_priv,
2454 								power_domain),
2455 				 power_domains->domain_use_count[power_domain]);
2456 	}
2457 
2458 	mutex_unlock(&power_domains->lock);
2459 
2460 	return 0;
2461 }
2462 
2463 static int i915_dmc_info(struct seq_file *m, void *unused)
2464 {
2465 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2466 	intel_wakeref_t wakeref;
2467 	struct intel_csr *csr;
2468 
2469 	if (!HAS_CSR(dev_priv))
2470 		return -ENODEV;
2471 
2472 	csr = &dev_priv->csr;
2473 
2474 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2475 
2476 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2477 	seq_printf(m, "path: %s\n", csr->fw_path);
2478 
2479 	if (!csr->dmc_payload)
2480 		goto out;
2481 
2482 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2483 		   CSR_VERSION_MINOR(csr->version));
2484 
2485 	if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2486 		goto out;
2487 
2488 	seq_printf(m, "DC3 -> DC5 count: %d\n",
2489 		   I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2490 						    SKL_CSR_DC3_DC5_COUNT));
2491 	if (!IS_GEN9_LP(dev_priv))
2492 		seq_printf(m, "DC5 -> DC6 count: %d\n",
2493 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2494 
2495 out:
2496 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2497 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2498 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2499 
2500 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2501 
2502 	return 0;
2503 }
2504 
2505 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2506 				 struct drm_display_mode *mode)
2507 {
2508 	int i;
2509 
2510 	for (i = 0; i < tabs; i++)
2511 		seq_putc(m, '\t');
2512 
2513 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2514 }
2515 
2516 static void intel_encoder_info(struct seq_file *m,
2517 			       struct intel_crtc *intel_crtc,
2518 			       struct intel_encoder *intel_encoder)
2519 {
2520 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2521 	struct drm_device *dev = &dev_priv->drm;
2522 	struct drm_crtc *crtc = &intel_crtc->base;
2523 	struct intel_connector *intel_connector;
2524 	struct drm_encoder *encoder;
2525 
2526 	encoder = &intel_encoder->base;
2527 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2528 		   encoder->base.id, encoder->name);
2529 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2530 		struct drm_connector *connector = &intel_connector->base;
2531 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2532 			   connector->base.id,
2533 			   connector->name,
2534 			   drm_get_connector_status_name(connector->status));
2535 		if (connector->status == connector_status_connected) {
2536 			struct drm_display_mode *mode = &crtc->mode;
2537 			seq_printf(m, ", mode:\n");
2538 			intel_seq_print_mode(m, 2, mode);
2539 		} else {
2540 			seq_putc(m, '\n');
2541 		}
2542 	}
2543 }
2544 
2545 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2546 {
2547 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2548 	struct drm_device *dev = &dev_priv->drm;
2549 	struct drm_crtc *crtc = &intel_crtc->base;
2550 	struct intel_encoder *intel_encoder;
2551 	struct drm_plane_state *plane_state = crtc->primary->state;
2552 	struct drm_framebuffer *fb = plane_state->fb;
2553 
2554 	if (fb)
2555 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2556 			   fb->base.id, plane_state->src_x >> 16,
2557 			   plane_state->src_y >> 16, fb->width, fb->height);
2558 	else
2559 		seq_puts(m, "\tprimary plane disabled\n");
2560 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2561 		intel_encoder_info(m, intel_crtc, intel_encoder);
2562 }
2563 
2564 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2565 {
2566 	struct drm_display_mode *mode = panel->fixed_mode;
2567 
2568 	seq_printf(m, "\tfixed mode:\n");
2569 	intel_seq_print_mode(m, 2, mode);
2570 }
2571 
2572 static void intel_hdcp_info(struct seq_file *m,
2573 			    struct intel_connector *intel_connector)
2574 {
2575 	bool hdcp_cap, hdcp2_cap;
2576 
2577 	hdcp_cap = intel_hdcp_capable(intel_connector);
2578 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
2579 
2580 	if (hdcp_cap)
2581 		seq_puts(m, "HDCP1.4 ");
2582 	if (hdcp2_cap)
2583 		seq_puts(m, "HDCP2.2 ");
2584 
2585 	if (!hdcp_cap && !hdcp2_cap)
2586 		seq_puts(m, "None");
2587 
2588 	seq_puts(m, "\n");
2589 }
2590 
2591 static void intel_dp_info(struct seq_file *m,
2592 			  struct intel_connector *intel_connector)
2593 {
2594 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2595 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2596 
2597 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2598 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2599 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2600 		intel_panel_info(m, &intel_connector->panel);
2601 
2602 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2603 				&intel_dp->aux);
2604 	if (intel_connector->hdcp.shim) {
2605 		seq_puts(m, "\tHDCP version: ");
2606 		intel_hdcp_info(m, intel_connector);
2607 	}
2608 }
2609 
2610 static void intel_dp_mst_info(struct seq_file *m,
2611 			  struct intel_connector *intel_connector)
2612 {
2613 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2614 	struct intel_dp_mst_encoder *intel_mst =
2615 		enc_to_mst(&intel_encoder->base);
2616 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
2617 	struct intel_dp *intel_dp = &intel_dig_port->dp;
2618 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2619 					intel_connector->port);
2620 
2621 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2622 }
2623 
2624 static void intel_hdmi_info(struct seq_file *m,
2625 			    struct intel_connector *intel_connector)
2626 {
2627 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2628 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2629 
2630 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2631 	if (intel_connector->hdcp.shim) {
2632 		seq_puts(m, "\tHDCP version: ");
2633 		intel_hdcp_info(m, intel_connector);
2634 	}
2635 }
2636 
2637 static void intel_lvds_info(struct seq_file *m,
2638 			    struct intel_connector *intel_connector)
2639 {
2640 	intel_panel_info(m, &intel_connector->panel);
2641 }
2642 
2643 static void intel_connector_info(struct seq_file *m,
2644 				 struct drm_connector *connector)
2645 {
2646 	struct intel_connector *intel_connector = to_intel_connector(connector);
2647 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2648 	struct drm_display_mode *mode;
2649 
2650 	seq_printf(m, "connector %d: type %s, status: %s\n",
2651 		   connector->base.id, connector->name,
2652 		   drm_get_connector_status_name(connector->status));
2653 
2654 	if (connector->status == connector_status_disconnected)
2655 		return;
2656 
2657 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2658 		   connector->display_info.width_mm,
2659 		   connector->display_info.height_mm);
2660 	seq_printf(m, "\tsubpixel order: %s\n",
2661 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2662 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2663 
2664 	if (!intel_encoder)
2665 		return;
2666 
2667 	switch (connector->connector_type) {
2668 	case DRM_MODE_CONNECTOR_DisplayPort:
2669 	case DRM_MODE_CONNECTOR_eDP:
2670 		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2671 			intel_dp_mst_info(m, intel_connector);
2672 		else
2673 			intel_dp_info(m, intel_connector);
2674 		break;
2675 	case DRM_MODE_CONNECTOR_LVDS:
2676 		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2677 			intel_lvds_info(m, intel_connector);
2678 		break;
2679 	case DRM_MODE_CONNECTOR_HDMIA:
2680 		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2681 		    intel_encoder->type == INTEL_OUTPUT_DDI)
2682 			intel_hdmi_info(m, intel_connector);
2683 		break;
2684 	default:
2685 		break;
2686 	}
2687 
2688 	seq_printf(m, "\tmodes:\n");
2689 	list_for_each_entry(mode, &connector->modes, head)
2690 		intel_seq_print_mode(m, 2, mode);
2691 }
2692 
2693 static const char *plane_type(enum drm_plane_type type)
2694 {
2695 	switch (type) {
2696 	case DRM_PLANE_TYPE_OVERLAY:
2697 		return "OVL";
2698 	case DRM_PLANE_TYPE_PRIMARY:
2699 		return "PRI";
2700 	case DRM_PLANE_TYPE_CURSOR:
2701 		return "CUR";
2702 	/*
2703 	 * Deliberately omitting default: to generate compiler warnings
2704 	 * when a new drm_plane_type gets added.
2705 	 */
2706 	}
2707 
2708 	return "unknown";
2709 }
2710 
2711 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2712 {
2713 	/*
2714 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2715 	 * will print them all to visualize if the values are misused
2716 	 */
2717 	snprintf(buf, bufsize,
2718 		 "%s%s%s%s%s%s(0x%08x)",
2719 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2720 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2721 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2722 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2723 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2724 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2725 		 rotation);
2726 }
2727 
2728 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2729 {
2730 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2731 	struct drm_device *dev = &dev_priv->drm;
2732 	struct intel_plane *intel_plane;
2733 
2734 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2735 		struct drm_plane_state *state;
2736 		struct drm_plane *plane = &intel_plane->base;
2737 		struct drm_format_name_buf format_name;
2738 		char rot_str[48];
2739 
2740 		if (!plane->state) {
2741 			seq_puts(m, "plane->state is NULL!\n");
2742 			continue;
2743 		}
2744 
2745 		state = plane->state;
2746 
2747 		if (state->fb) {
2748 			drm_get_format_name(state->fb->format->format,
2749 					    &format_name);
2750 		} else {
2751 			sprintf(format_name.str, "N/A");
2752 		}
2753 
2754 		plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2755 
2756 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2757 			   plane->base.id,
2758 			   plane_type(intel_plane->base.type),
2759 			   state->crtc_x, state->crtc_y,
2760 			   state->crtc_w, state->crtc_h,
2761 			   (state->src_x >> 16),
2762 			   ((state->src_x & 0xffff) * 15625) >> 10,
2763 			   (state->src_y >> 16),
2764 			   ((state->src_y & 0xffff) * 15625) >> 10,
2765 			   (state->src_w >> 16),
2766 			   ((state->src_w & 0xffff) * 15625) >> 10,
2767 			   (state->src_h >> 16),
2768 			   ((state->src_h & 0xffff) * 15625) >> 10,
2769 			   format_name.str,
2770 			   rot_str);
2771 	}
2772 }
2773 
2774 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2775 {
2776 	struct intel_crtc_state *pipe_config;
2777 	int num_scalers = intel_crtc->num_scalers;
2778 	int i;
2779 
2780 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2781 
2782 	/* Not all platformas have a scaler */
2783 	if (num_scalers) {
2784 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2785 			   num_scalers,
2786 			   pipe_config->scaler_state.scaler_users,
2787 			   pipe_config->scaler_state.scaler_id);
2788 
2789 		for (i = 0; i < num_scalers; i++) {
2790 			struct intel_scaler *sc =
2791 					&pipe_config->scaler_state.scalers[i];
2792 
2793 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2794 				   i, yesno(sc->in_use), sc->mode);
2795 		}
2796 		seq_puts(m, "\n");
2797 	} else {
2798 		seq_puts(m, "\tNo scalers available on this platform\n");
2799 	}
2800 }
2801 
2802 static int i915_display_info(struct seq_file *m, void *unused)
2803 {
2804 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2805 	struct drm_device *dev = &dev_priv->drm;
2806 	struct intel_crtc *crtc;
2807 	struct drm_connector *connector;
2808 	struct drm_connector_list_iter conn_iter;
2809 	intel_wakeref_t wakeref;
2810 
2811 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2812 
2813 	seq_printf(m, "CRTC info\n");
2814 	seq_printf(m, "---------\n");
2815 	for_each_intel_crtc(dev, crtc) {
2816 		struct intel_crtc_state *pipe_config;
2817 
2818 		drm_modeset_lock(&crtc->base.mutex, NULL);
2819 		pipe_config = to_intel_crtc_state(crtc->base.state);
2820 
2821 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2822 			   crtc->base.base.id, pipe_name(crtc->pipe),
2823 			   yesno(pipe_config->base.active),
2824 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2825 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
2826 
2827 		if (pipe_config->base.active) {
2828 			struct intel_plane *cursor =
2829 				to_intel_plane(crtc->base.cursor);
2830 
2831 			intel_crtc_info(m, crtc);
2832 
2833 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2834 				   yesno(cursor->base.state->visible),
2835 				   cursor->base.state->crtc_x,
2836 				   cursor->base.state->crtc_y,
2837 				   cursor->base.state->crtc_w,
2838 				   cursor->base.state->crtc_h,
2839 				   cursor->cursor.base);
2840 			intel_scaler_info(m, crtc);
2841 			intel_plane_info(m, crtc);
2842 		}
2843 
2844 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2845 			   yesno(!crtc->cpu_fifo_underrun_disabled),
2846 			   yesno(!crtc->pch_fifo_underrun_disabled));
2847 		drm_modeset_unlock(&crtc->base.mutex);
2848 	}
2849 
2850 	seq_printf(m, "\n");
2851 	seq_printf(m, "Connector info\n");
2852 	seq_printf(m, "--------------\n");
2853 	mutex_lock(&dev->mode_config.mutex);
2854 	drm_connector_list_iter_begin(dev, &conn_iter);
2855 	drm_for_each_connector_iter(connector, &conn_iter)
2856 		intel_connector_info(m, connector);
2857 	drm_connector_list_iter_end(&conn_iter);
2858 	mutex_unlock(&dev->mode_config.mutex);
2859 
2860 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2861 
2862 	return 0;
2863 }
2864 
2865 static int i915_engine_info(struct seq_file *m, void *unused)
2866 {
2867 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2868 	struct intel_engine_cs *engine;
2869 	intel_wakeref_t wakeref;
2870 	enum intel_engine_id id;
2871 	struct drm_printer p;
2872 
2873 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2874 
2875 	seq_printf(m, "GT awake? %s [%d]\n",
2876 		   yesno(dev_priv->gt.awake),
2877 		   atomic_read(&dev_priv->gt.wakeref.count));
2878 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
2879 		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2880 
2881 	p = drm_seq_file_printer(m);
2882 	for_each_engine(engine, dev_priv, id)
2883 		intel_engine_dump(engine, &p, "%s\n", engine->name);
2884 
2885 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2886 
2887 	return 0;
2888 }
2889 
2890 static int i915_rcs_topology(struct seq_file *m, void *unused)
2891 {
2892 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2893 	struct drm_printer p = drm_seq_file_printer(m);
2894 
2895 	intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2896 
2897 	return 0;
2898 }
2899 
2900 static int i915_shrinker_info(struct seq_file *m, void *unused)
2901 {
2902 	struct drm_i915_private *i915 = node_to_i915(m->private);
2903 
2904 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2905 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2906 
2907 	return 0;
2908 }
2909 
2910 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2911 {
2912 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2913 	struct drm_device *dev = &dev_priv->drm;
2914 	int i;
2915 
2916 	drm_modeset_lock_all(dev);
2917 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2918 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2919 
2920 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2921 			   pll->info->id);
2922 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2923 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2924 		seq_printf(m, " tracked hardware state:\n");
2925 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
2926 		seq_printf(m, " dpll_md: 0x%08x\n",
2927 			   pll->state.hw_state.dpll_md);
2928 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
2929 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
2930 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
2931 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
2932 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
2933 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
2934 			   pll->state.hw_state.mg_refclkin_ctl);
2935 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2936 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
2937 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
2938 			   pll->state.hw_state.mg_clktop2_hsclkctl);
2939 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
2940 			   pll->state.hw_state.mg_pll_div0);
2941 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
2942 			   pll->state.hw_state.mg_pll_div1);
2943 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
2944 			   pll->state.hw_state.mg_pll_lf);
2945 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2946 			   pll->state.hw_state.mg_pll_frac_lock);
2947 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
2948 			   pll->state.hw_state.mg_pll_ssc);
2949 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
2950 			   pll->state.hw_state.mg_pll_bias);
2951 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2952 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
2953 	}
2954 	drm_modeset_unlock_all(dev);
2955 
2956 	return 0;
2957 }
2958 
2959 static int i915_wa_registers(struct seq_file *m, void *unused)
2960 {
2961 	struct drm_i915_private *i915 = node_to_i915(m->private);
2962 	struct intel_engine_cs *engine;
2963 	enum intel_engine_id id;
2964 
2965 	for_each_engine(engine, i915, id) {
2966 		const struct i915_wa_list *wal = &engine->ctx_wa_list;
2967 		const struct i915_wa *wa;
2968 		unsigned int count;
2969 
2970 		count = wal->count;
2971 		if (!count)
2972 			continue;
2973 
2974 		seq_printf(m, "%s: Workarounds applied: %u\n",
2975 			   engine->name, count);
2976 
2977 		for (wa = wal->list; count--; wa++)
2978 			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2979 				   i915_mmio_reg_offset(wa->reg),
2980 				   wa->val, wa->mask);
2981 
2982 		seq_printf(m, "\n");
2983 	}
2984 
2985 	return 0;
2986 }
2987 
2988 static int i915_ipc_status_show(struct seq_file *m, void *data)
2989 {
2990 	struct drm_i915_private *dev_priv = m->private;
2991 
2992 	seq_printf(m, "Isochronous Priority Control: %s\n",
2993 			yesno(dev_priv->ipc_enabled));
2994 	return 0;
2995 }
2996 
2997 static int i915_ipc_status_open(struct inode *inode, struct file *file)
2998 {
2999 	struct drm_i915_private *dev_priv = inode->i_private;
3000 
3001 	if (!HAS_IPC(dev_priv))
3002 		return -ENODEV;
3003 
3004 	return single_open(file, i915_ipc_status_show, dev_priv);
3005 }
3006 
3007 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3008 				     size_t len, loff_t *offp)
3009 {
3010 	struct seq_file *m = file->private_data;
3011 	struct drm_i915_private *dev_priv = m->private;
3012 	intel_wakeref_t wakeref;
3013 	bool enable;
3014 	int ret;
3015 
3016 	ret = kstrtobool_from_user(ubuf, len, &enable);
3017 	if (ret < 0)
3018 		return ret;
3019 
3020 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3021 		if (!dev_priv->ipc_enabled && enable)
3022 			DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3023 		dev_priv->wm.distrust_bios_wm = true;
3024 		dev_priv->ipc_enabled = enable;
3025 		intel_enable_ipc(dev_priv);
3026 	}
3027 
3028 	return len;
3029 }
3030 
3031 static const struct file_operations i915_ipc_status_fops = {
3032 	.owner = THIS_MODULE,
3033 	.open = i915_ipc_status_open,
3034 	.read = seq_read,
3035 	.llseek = seq_lseek,
3036 	.release = single_release,
3037 	.write = i915_ipc_status_write
3038 };
3039 
3040 static int i915_ddb_info(struct seq_file *m, void *unused)
3041 {
3042 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3043 	struct drm_device *dev = &dev_priv->drm;
3044 	struct skl_ddb_entry *entry;
3045 	struct intel_crtc *crtc;
3046 
3047 	if (INTEL_GEN(dev_priv) < 9)
3048 		return -ENODEV;
3049 
3050 	drm_modeset_lock_all(dev);
3051 
3052 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3053 
3054 	for_each_intel_crtc(&dev_priv->drm, crtc) {
3055 		struct intel_crtc_state *crtc_state =
3056 			to_intel_crtc_state(crtc->base.state);
3057 		enum pipe pipe = crtc->pipe;
3058 		enum plane_id plane_id;
3059 
3060 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3061 
3062 		for_each_plane_id_on_crtc(crtc, plane_id) {
3063 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3064 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3065 				   entry->start, entry->end,
3066 				   skl_ddb_entry_size(entry));
3067 		}
3068 
3069 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3070 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3071 			   entry->end, skl_ddb_entry_size(entry));
3072 	}
3073 
3074 	drm_modeset_unlock_all(dev);
3075 
3076 	return 0;
3077 }
3078 
3079 static void drrs_status_per_crtc(struct seq_file *m,
3080 				 struct drm_device *dev,
3081 				 struct intel_crtc *intel_crtc)
3082 {
3083 	struct drm_i915_private *dev_priv = to_i915(dev);
3084 	struct i915_drrs *drrs = &dev_priv->drrs;
3085 	int vrefresh = 0;
3086 	struct drm_connector *connector;
3087 	struct drm_connector_list_iter conn_iter;
3088 
3089 	drm_connector_list_iter_begin(dev, &conn_iter);
3090 	drm_for_each_connector_iter(connector, &conn_iter) {
3091 		if (connector->state->crtc != &intel_crtc->base)
3092 			continue;
3093 
3094 		seq_printf(m, "%s:\n", connector->name);
3095 	}
3096 	drm_connector_list_iter_end(&conn_iter);
3097 
3098 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3099 		seq_puts(m, "\tVBT: DRRS_type: Static");
3100 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3101 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3102 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3103 		seq_puts(m, "\tVBT: DRRS_type: None");
3104 	else
3105 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3106 
3107 	seq_puts(m, "\n\n");
3108 
3109 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3110 		struct intel_panel *panel;
3111 
3112 		mutex_lock(&drrs->mutex);
3113 		/* DRRS Supported */
3114 		seq_puts(m, "\tDRRS Supported: Yes\n");
3115 
3116 		/* disable_drrs() will make drrs->dp NULL */
3117 		if (!drrs->dp) {
3118 			seq_puts(m, "Idleness DRRS: Disabled\n");
3119 			if (dev_priv->psr.enabled)
3120 				seq_puts(m,
3121 				"\tAs PSR is enabled, DRRS is not enabled\n");
3122 			mutex_unlock(&drrs->mutex);
3123 			return;
3124 		}
3125 
3126 		panel = &drrs->dp->attached_connector->panel;
3127 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3128 					drrs->busy_frontbuffer_bits);
3129 
3130 		seq_puts(m, "\n\t\t");
3131 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3132 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3133 			vrefresh = panel->fixed_mode->vrefresh;
3134 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3135 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3136 			vrefresh = panel->downclock_mode->vrefresh;
3137 		} else {
3138 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3139 						drrs->refresh_rate_type);
3140 			mutex_unlock(&drrs->mutex);
3141 			return;
3142 		}
3143 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3144 
3145 		seq_puts(m, "\n\t\t");
3146 		mutex_unlock(&drrs->mutex);
3147 	} else {
3148 		/* DRRS not supported. Print the VBT parameter*/
3149 		seq_puts(m, "\tDRRS Supported : No");
3150 	}
3151 	seq_puts(m, "\n");
3152 }
3153 
3154 static int i915_drrs_status(struct seq_file *m, void *unused)
3155 {
3156 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3157 	struct drm_device *dev = &dev_priv->drm;
3158 	struct intel_crtc *intel_crtc;
3159 	int active_crtc_cnt = 0;
3160 
3161 	drm_modeset_lock_all(dev);
3162 	for_each_intel_crtc(dev, intel_crtc) {
3163 		if (intel_crtc->base.state->active) {
3164 			active_crtc_cnt++;
3165 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3166 
3167 			drrs_status_per_crtc(m, dev, intel_crtc);
3168 		}
3169 	}
3170 	drm_modeset_unlock_all(dev);
3171 
3172 	if (!active_crtc_cnt)
3173 		seq_puts(m, "No active crtc found\n");
3174 
3175 	return 0;
3176 }
3177 
3178 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3179 {
3180 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3181 	struct drm_device *dev = &dev_priv->drm;
3182 	struct intel_encoder *intel_encoder;
3183 	struct intel_digital_port *intel_dig_port;
3184 	struct drm_connector *connector;
3185 	struct drm_connector_list_iter conn_iter;
3186 
3187 	drm_connector_list_iter_begin(dev, &conn_iter);
3188 	drm_for_each_connector_iter(connector, &conn_iter) {
3189 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3190 			continue;
3191 
3192 		intel_encoder = intel_attached_encoder(connector);
3193 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3194 			continue;
3195 
3196 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3197 		if (!intel_dig_port->dp.can_mst)
3198 			continue;
3199 
3200 		seq_printf(m, "MST Source Port %c\n",
3201 			   port_name(intel_dig_port->base.port));
3202 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3203 	}
3204 	drm_connector_list_iter_end(&conn_iter);
3205 
3206 	return 0;
3207 }
3208 
3209 static ssize_t i915_displayport_test_active_write(struct file *file,
3210 						  const char __user *ubuf,
3211 						  size_t len, loff_t *offp)
3212 {
3213 	char *input_buffer;
3214 	int status = 0;
3215 	struct drm_device *dev;
3216 	struct drm_connector *connector;
3217 	struct drm_connector_list_iter conn_iter;
3218 	struct intel_dp *intel_dp;
3219 	int val = 0;
3220 
3221 	dev = ((struct seq_file *)file->private_data)->private;
3222 
3223 	if (len == 0)
3224 		return 0;
3225 
3226 	input_buffer = memdup_user_nul(ubuf, len);
3227 	if (IS_ERR(input_buffer))
3228 		return PTR_ERR(input_buffer);
3229 
3230 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3231 
3232 	drm_connector_list_iter_begin(dev, &conn_iter);
3233 	drm_for_each_connector_iter(connector, &conn_iter) {
3234 		struct intel_encoder *encoder;
3235 
3236 		if (connector->connector_type !=
3237 		    DRM_MODE_CONNECTOR_DisplayPort)
3238 			continue;
3239 
3240 		encoder = to_intel_encoder(connector->encoder);
3241 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3242 			continue;
3243 
3244 		if (encoder && connector->status == connector_status_connected) {
3245 			intel_dp = enc_to_intel_dp(&encoder->base);
3246 			status = kstrtoint(input_buffer, 10, &val);
3247 			if (status < 0)
3248 				break;
3249 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3250 			/* To prevent erroneous activation of the compliance
3251 			 * testing code, only accept an actual value of 1 here
3252 			 */
3253 			if (val == 1)
3254 				intel_dp->compliance.test_active = 1;
3255 			else
3256 				intel_dp->compliance.test_active = 0;
3257 		}
3258 	}
3259 	drm_connector_list_iter_end(&conn_iter);
3260 	kfree(input_buffer);
3261 	if (status < 0)
3262 		return status;
3263 
3264 	*offp += len;
3265 	return len;
3266 }
3267 
3268 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3269 {
3270 	struct drm_i915_private *dev_priv = m->private;
3271 	struct drm_device *dev = &dev_priv->drm;
3272 	struct drm_connector *connector;
3273 	struct drm_connector_list_iter conn_iter;
3274 	struct intel_dp *intel_dp;
3275 
3276 	drm_connector_list_iter_begin(dev, &conn_iter);
3277 	drm_for_each_connector_iter(connector, &conn_iter) {
3278 		struct intel_encoder *encoder;
3279 
3280 		if (connector->connector_type !=
3281 		    DRM_MODE_CONNECTOR_DisplayPort)
3282 			continue;
3283 
3284 		encoder = to_intel_encoder(connector->encoder);
3285 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3286 			continue;
3287 
3288 		if (encoder && connector->status == connector_status_connected) {
3289 			intel_dp = enc_to_intel_dp(&encoder->base);
3290 			if (intel_dp->compliance.test_active)
3291 				seq_puts(m, "1");
3292 			else
3293 				seq_puts(m, "0");
3294 		} else
3295 			seq_puts(m, "0");
3296 	}
3297 	drm_connector_list_iter_end(&conn_iter);
3298 
3299 	return 0;
3300 }
3301 
3302 static int i915_displayport_test_active_open(struct inode *inode,
3303 					     struct file *file)
3304 {
3305 	return single_open(file, i915_displayport_test_active_show,
3306 			   inode->i_private);
3307 }
3308 
3309 static const struct file_operations i915_displayport_test_active_fops = {
3310 	.owner = THIS_MODULE,
3311 	.open = i915_displayport_test_active_open,
3312 	.read = seq_read,
3313 	.llseek = seq_lseek,
3314 	.release = single_release,
3315 	.write = i915_displayport_test_active_write
3316 };
3317 
3318 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3319 {
3320 	struct drm_i915_private *dev_priv = m->private;
3321 	struct drm_device *dev = &dev_priv->drm;
3322 	struct drm_connector *connector;
3323 	struct drm_connector_list_iter conn_iter;
3324 	struct intel_dp *intel_dp;
3325 
3326 	drm_connector_list_iter_begin(dev, &conn_iter);
3327 	drm_for_each_connector_iter(connector, &conn_iter) {
3328 		struct intel_encoder *encoder;
3329 
3330 		if (connector->connector_type !=
3331 		    DRM_MODE_CONNECTOR_DisplayPort)
3332 			continue;
3333 
3334 		encoder = to_intel_encoder(connector->encoder);
3335 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3336 			continue;
3337 
3338 		if (encoder && connector->status == connector_status_connected) {
3339 			intel_dp = enc_to_intel_dp(&encoder->base);
3340 			if (intel_dp->compliance.test_type ==
3341 			    DP_TEST_LINK_EDID_READ)
3342 				seq_printf(m, "%lx",
3343 					   intel_dp->compliance.test_data.edid);
3344 			else if (intel_dp->compliance.test_type ==
3345 				 DP_TEST_LINK_VIDEO_PATTERN) {
3346 				seq_printf(m, "hdisplay: %d\n",
3347 					   intel_dp->compliance.test_data.hdisplay);
3348 				seq_printf(m, "vdisplay: %d\n",
3349 					   intel_dp->compliance.test_data.vdisplay);
3350 				seq_printf(m, "bpc: %u\n",
3351 					   intel_dp->compliance.test_data.bpc);
3352 			}
3353 		} else
3354 			seq_puts(m, "0");
3355 	}
3356 	drm_connector_list_iter_end(&conn_iter);
3357 
3358 	return 0;
3359 }
3360 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3361 
3362 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3363 {
3364 	struct drm_i915_private *dev_priv = m->private;
3365 	struct drm_device *dev = &dev_priv->drm;
3366 	struct drm_connector *connector;
3367 	struct drm_connector_list_iter conn_iter;
3368 	struct intel_dp *intel_dp;
3369 
3370 	drm_connector_list_iter_begin(dev, &conn_iter);
3371 	drm_for_each_connector_iter(connector, &conn_iter) {
3372 		struct intel_encoder *encoder;
3373 
3374 		if (connector->connector_type !=
3375 		    DRM_MODE_CONNECTOR_DisplayPort)
3376 			continue;
3377 
3378 		encoder = to_intel_encoder(connector->encoder);
3379 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3380 			continue;
3381 
3382 		if (encoder && connector->status == connector_status_connected) {
3383 			intel_dp = enc_to_intel_dp(&encoder->base);
3384 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3385 		} else
3386 			seq_puts(m, "0");
3387 	}
3388 	drm_connector_list_iter_end(&conn_iter);
3389 
3390 	return 0;
3391 }
3392 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3393 
3394 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3395 {
3396 	struct drm_i915_private *dev_priv = m->private;
3397 	struct drm_device *dev = &dev_priv->drm;
3398 	int level;
3399 	int num_levels;
3400 
3401 	if (IS_CHERRYVIEW(dev_priv))
3402 		num_levels = 3;
3403 	else if (IS_VALLEYVIEW(dev_priv))
3404 		num_levels = 1;
3405 	else if (IS_G4X(dev_priv))
3406 		num_levels = 3;
3407 	else
3408 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3409 
3410 	drm_modeset_lock_all(dev);
3411 
3412 	for (level = 0; level < num_levels; level++) {
3413 		unsigned int latency = wm[level];
3414 
3415 		/*
3416 		 * - WM1+ latency values in 0.5us units
3417 		 * - latencies are in us on gen9/vlv/chv
3418 		 */
3419 		if (INTEL_GEN(dev_priv) >= 9 ||
3420 		    IS_VALLEYVIEW(dev_priv) ||
3421 		    IS_CHERRYVIEW(dev_priv) ||
3422 		    IS_G4X(dev_priv))
3423 			latency *= 10;
3424 		else if (level > 0)
3425 			latency *= 5;
3426 
3427 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3428 			   level, wm[level], latency / 10, latency % 10);
3429 	}
3430 
3431 	drm_modeset_unlock_all(dev);
3432 }
3433 
3434 static int pri_wm_latency_show(struct seq_file *m, void *data)
3435 {
3436 	struct drm_i915_private *dev_priv = m->private;
3437 	const u16 *latencies;
3438 
3439 	if (INTEL_GEN(dev_priv) >= 9)
3440 		latencies = dev_priv->wm.skl_latency;
3441 	else
3442 		latencies = dev_priv->wm.pri_latency;
3443 
3444 	wm_latency_show(m, latencies);
3445 
3446 	return 0;
3447 }
3448 
3449 static int spr_wm_latency_show(struct seq_file *m, void *data)
3450 {
3451 	struct drm_i915_private *dev_priv = m->private;
3452 	const u16 *latencies;
3453 
3454 	if (INTEL_GEN(dev_priv) >= 9)
3455 		latencies = dev_priv->wm.skl_latency;
3456 	else
3457 		latencies = dev_priv->wm.spr_latency;
3458 
3459 	wm_latency_show(m, latencies);
3460 
3461 	return 0;
3462 }
3463 
3464 static int cur_wm_latency_show(struct seq_file *m, void *data)
3465 {
3466 	struct drm_i915_private *dev_priv = m->private;
3467 	const u16 *latencies;
3468 
3469 	if (INTEL_GEN(dev_priv) >= 9)
3470 		latencies = dev_priv->wm.skl_latency;
3471 	else
3472 		latencies = dev_priv->wm.cur_latency;
3473 
3474 	wm_latency_show(m, latencies);
3475 
3476 	return 0;
3477 }
3478 
3479 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3480 {
3481 	struct drm_i915_private *dev_priv = inode->i_private;
3482 
3483 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3484 		return -ENODEV;
3485 
3486 	return single_open(file, pri_wm_latency_show, dev_priv);
3487 }
3488 
3489 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3490 {
3491 	struct drm_i915_private *dev_priv = inode->i_private;
3492 
3493 	if (HAS_GMCH(dev_priv))
3494 		return -ENODEV;
3495 
3496 	return single_open(file, spr_wm_latency_show, dev_priv);
3497 }
3498 
3499 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3500 {
3501 	struct drm_i915_private *dev_priv = inode->i_private;
3502 
3503 	if (HAS_GMCH(dev_priv))
3504 		return -ENODEV;
3505 
3506 	return single_open(file, cur_wm_latency_show, dev_priv);
3507 }
3508 
3509 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3510 				size_t len, loff_t *offp, u16 wm[8])
3511 {
3512 	struct seq_file *m = file->private_data;
3513 	struct drm_i915_private *dev_priv = m->private;
3514 	struct drm_device *dev = &dev_priv->drm;
3515 	u16 new[8] = { 0 };
3516 	int num_levels;
3517 	int level;
3518 	int ret;
3519 	char tmp[32];
3520 
3521 	if (IS_CHERRYVIEW(dev_priv))
3522 		num_levels = 3;
3523 	else if (IS_VALLEYVIEW(dev_priv))
3524 		num_levels = 1;
3525 	else if (IS_G4X(dev_priv))
3526 		num_levels = 3;
3527 	else
3528 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3529 
3530 	if (len >= sizeof(tmp))
3531 		return -EINVAL;
3532 
3533 	if (copy_from_user(tmp, ubuf, len))
3534 		return -EFAULT;
3535 
3536 	tmp[len] = '\0';
3537 
3538 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3539 		     &new[0], &new[1], &new[2], &new[3],
3540 		     &new[4], &new[5], &new[6], &new[7]);
3541 	if (ret != num_levels)
3542 		return -EINVAL;
3543 
3544 	drm_modeset_lock_all(dev);
3545 
3546 	for (level = 0; level < num_levels; level++)
3547 		wm[level] = new[level];
3548 
3549 	drm_modeset_unlock_all(dev);
3550 
3551 	return len;
3552 }
3553 
3554 
3555 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3556 				    size_t len, loff_t *offp)
3557 {
3558 	struct seq_file *m = file->private_data;
3559 	struct drm_i915_private *dev_priv = m->private;
3560 	u16 *latencies;
3561 
3562 	if (INTEL_GEN(dev_priv) >= 9)
3563 		latencies = dev_priv->wm.skl_latency;
3564 	else
3565 		latencies = dev_priv->wm.pri_latency;
3566 
3567 	return wm_latency_write(file, ubuf, len, offp, latencies);
3568 }
3569 
3570 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3571 				    size_t len, loff_t *offp)
3572 {
3573 	struct seq_file *m = file->private_data;
3574 	struct drm_i915_private *dev_priv = m->private;
3575 	u16 *latencies;
3576 
3577 	if (INTEL_GEN(dev_priv) >= 9)
3578 		latencies = dev_priv->wm.skl_latency;
3579 	else
3580 		latencies = dev_priv->wm.spr_latency;
3581 
3582 	return wm_latency_write(file, ubuf, len, offp, latencies);
3583 }
3584 
3585 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3586 				    size_t len, loff_t *offp)
3587 {
3588 	struct seq_file *m = file->private_data;
3589 	struct drm_i915_private *dev_priv = m->private;
3590 	u16 *latencies;
3591 
3592 	if (INTEL_GEN(dev_priv) >= 9)
3593 		latencies = dev_priv->wm.skl_latency;
3594 	else
3595 		latencies = dev_priv->wm.cur_latency;
3596 
3597 	return wm_latency_write(file, ubuf, len, offp, latencies);
3598 }
3599 
3600 static const struct file_operations i915_pri_wm_latency_fops = {
3601 	.owner = THIS_MODULE,
3602 	.open = pri_wm_latency_open,
3603 	.read = seq_read,
3604 	.llseek = seq_lseek,
3605 	.release = single_release,
3606 	.write = pri_wm_latency_write
3607 };
3608 
3609 static const struct file_operations i915_spr_wm_latency_fops = {
3610 	.owner = THIS_MODULE,
3611 	.open = spr_wm_latency_open,
3612 	.read = seq_read,
3613 	.llseek = seq_lseek,
3614 	.release = single_release,
3615 	.write = spr_wm_latency_write
3616 };
3617 
3618 static const struct file_operations i915_cur_wm_latency_fops = {
3619 	.owner = THIS_MODULE,
3620 	.open = cur_wm_latency_open,
3621 	.read = seq_read,
3622 	.llseek = seq_lseek,
3623 	.release = single_release,
3624 	.write = cur_wm_latency_write
3625 };
3626 
3627 static int
3628 i915_wedged_get(void *data, u64 *val)
3629 {
3630 	struct drm_i915_private *i915 = data;
3631 	int ret = intel_gt_terminally_wedged(&i915->gt);
3632 
3633 	switch (ret) {
3634 	case -EIO:
3635 		*val = 1;
3636 		return 0;
3637 	case 0:
3638 		*val = 0;
3639 		return 0;
3640 	default:
3641 		return ret;
3642 	}
3643 }
3644 
3645 static int
3646 i915_wedged_set(void *data, u64 val)
3647 {
3648 	struct drm_i915_private *i915 = data;
3649 
3650 	/* Flush any previous reset before applying for a new one */
3651 	wait_event(i915->gt.reset.queue,
3652 		   !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
3653 
3654 	intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3655 			      "Manually set wedged engine mask = %llx", val);
3656 	return 0;
3657 }
3658 
3659 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3660 			i915_wedged_get, i915_wedged_set,
3661 			"%llu\n");
3662 
3663 #define DROP_UNBOUND	BIT(0)
3664 #define DROP_BOUND	BIT(1)
3665 #define DROP_RETIRE	BIT(2)
3666 #define DROP_ACTIVE	BIT(3)
3667 #define DROP_FREED	BIT(4)
3668 #define DROP_SHRINK_ALL	BIT(5)
3669 #define DROP_IDLE	BIT(6)
3670 #define DROP_RESET_ACTIVE	BIT(7)
3671 #define DROP_RESET_SEQNO	BIT(8)
3672 #define DROP_ALL (DROP_UNBOUND	| \
3673 		  DROP_BOUND	| \
3674 		  DROP_RETIRE	| \
3675 		  DROP_ACTIVE	| \
3676 		  DROP_FREED	| \
3677 		  DROP_SHRINK_ALL |\
3678 		  DROP_IDLE	| \
3679 		  DROP_RESET_ACTIVE | \
3680 		  DROP_RESET_SEQNO)
3681 static int
3682 i915_drop_caches_get(void *data, u64 *val)
3683 {
3684 	*val = DROP_ALL;
3685 
3686 	return 0;
3687 }
3688 
3689 static int
3690 i915_drop_caches_set(void *data, u64 val)
3691 {
3692 	struct drm_i915_private *i915 = data;
3693 
3694 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3695 		  val, val & DROP_ALL);
3696 
3697 	if (val & DROP_RESET_ACTIVE &&
3698 	    wait_for(intel_engines_are_idle(&i915->gt),
3699 		     I915_IDLE_ENGINES_TIMEOUT))
3700 		intel_gt_set_wedged(&i915->gt);
3701 
3702 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
3703 	 * on ioctls on -EAGAIN. */
3704 	if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3705 		int ret;
3706 
3707 		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3708 		if (ret)
3709 			return ret;
3710 
3711 		/*
3712 		 * To finish the flush of the idle_worker, we must complete
3713 		 * the switch-to-kernel-context, which requires a double
3714 		 * pass through wait_for_idle: first queues the switch,
3715 		 * second waits for the switch.
3716 		 */
3717 		if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
3718 			ret = i915_gem_wait_for_idle(i915,
3719 						     I915_WAIT_INTERRUPTIBLE |
3720 						     I915_WAIT_LOCKED,
3721 						     MAX_SCHEDULE_TIMEOUT);
3722 
3723 		if (ret == 0 && val & DROP_IDLE)
3724 			ret = i915_gem_wait_for_idle(i915,
3725 						     I915_WAIT_INTERRUPTIBLE |
3726 						     I915_WAIT_LOCKED,
3727 						     MAX_SCHEDULE_TIMEOUT);
3728 
3729 		if (val & DROP_RETIRE)
3730 			i915_retire_requests(i915);
3731 
3732 		mutex_unlock(&i915->drm.struct_mutex);
3733 	}
3734 
3735 	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
3736 		intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
3737 
3738 	fs_reclaim_acquire(GFP_KERNEL);
3739 	if (val & DROP_BOUND)
3740 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3741 
3742 	if (val & DROP_UNBOUND)
3743 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3744 
3745 	if (val & DROP_SHRINK_ALL)
3746 		i915_gem_shrink_all(i915);
3747 	fs_reclaim_release(GFP_KERNEL);
3748 
3749 	if (val & DROP_IDLE) {
3750 		flush_delayed_work(&i915->gem.retire_work);
3751 		flush_work(&i915->gem.idle_work);
3752 	}
3753 
3754 	if (val & DROP_FREED)
3755 		i915_gem_drain_freed_objects(i915);
3756 
3757 	return 0;
3758 }
3759 
3760 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3761 			i915_drop_caches_get, i915_drop_caches_set,
3762 			"0x%08llx\n");
3763 
3764 static int
3765 i915_cache_sharing_get(void *data, u64 *val)
3766 {
3767 	struct drm_i915_private *dev_priv = data;
3768 	intel_wakeref_t wakeref;
3769 	u32 snpcr = 0;
3770 
3771 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3772 		return -ENODEV;
3773 
3774 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3775 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3776 
3777 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3778 
3779 	return 0;
3780 }
3781 
3782 static int
3783 i915_cache_sharing_set(void *data, u64 val)
3784 {
3785 	struct drm_i915_private *dev_priv = data;
3786 	intel_wakeref_t wakeref;
3787 
3788 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3789 		return -ENODEV;
3790 
3791 	if (val > 3)
3792 		return -EINVAL;
3793 
3794 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3795 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3796 		u32 snpcr;
3797 
3798 		/* Update the cache sharing policy here as well */
3799 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3800 		snpcr &= ~GEN6_MBC_SNPCR_MASK;
3801 		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3802 		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3803 	}
3804 
3805 	return 0;
3806 }
3807 
3808 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3809 			i915_cache_sharing_get, i915_cache_sharing_set,
3810 			"%llu\n");
3811 
3812 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3813 					  struct sseu_dev_info *sseu)
3814 {
3815 #define SS_MAX 2
3816 	const int ss_max = SS_MAX;
3817 	u32 sig1[SS_MAX], sig2[SS_MAX];
3818 	int ss;
3819 
3820 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3821 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3822 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3823 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3824 
3825 	for (ss = 0; ss < ss_max; ss++) {
3826 		unsigned int eu_cnt;
3827 
3828 		if (sig1[ss] & CHV_SS_PG_ENABLE)
3829 			/* skip disabled subslice */
3830 			continue;
3831 
3832 		sseu->slice_mask = BIT(0);
3833 		sseu->subslice_mask[0] |= BIT(ss);
3834 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3835 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3836 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3837 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3838 		sseu->eu_total += eu_cnt;
3839 		sseu->eu_per_subslice = max_t(unsigned int,
3840 					      sseu->eu_per_subslice, eu_cnt);
3841 	}
3842 #undef SS_MAX
3843 }
3844 
3845 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3846 				     struct sseu_dev_info *sseu)
3847 {
3848 #define SS_MAX 6
3849 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3850 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3851 	int s, ss;
3852 
3853 	for (s = 0; s < info->sseu.max_slices; s++) {
3854 		/*
3855 		 * FIXME: Valid SS Mask respects the spec and read
3856 		 * only valid bits for those registers, excluding reserved
3857 		 * although this seems wrong because it would leave many
3858 		 * subslices without ACK.
3859 		 */
3860 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3861 			GEN10_PGCTL_VALID_SS_MASK(s);
3862 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3863 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3864 	}
3865 
3866 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3867 		     GEN9_PGCTL_SSA_EU19_ACK |
3868 		     GEN9_PGCTL_SSA_EU210_ACK |
3869 		     GEN9_PGCTL_SSA_EU311_ACK;
3870 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3871 		     GEN9_PGCTL_SSB_EU19_ACK |
3872 		     GEN9_PGCTL_SSB_EU210_ACK |
3873 		     GEN9_PGCTL_SSB_EU311_ACK;
3874 
3875 	for (s = 0; s < info->sseu.max_slices; s++) {
3876 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3877 			/* skip disabled slice */
3878 			continue;
3879 
3880 		sseu->slice_mask |= BIT(s);
3881 		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
3882 
3883 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3884 			unsigned int eu_cnt;
3885 
3886 			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3887 				/* skip disabled subslice */
3888 				continue;
3889 
3890 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3891 					       eu_mask[ss % 2]);
3892 			sseu->eu_total += eu_cnt;
3893 			sseu->eu_per_subslice = max_t(unsigned int,
3894 						      sseu->eu_per_subslice,
3895 						      eu_cnt);
3896 		}
3897 	}
3898 #undef SS_MAX
3899 }
3900 
3901 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3902 				    struct sseu_dev_info *sseu)
3903 {
3904 #define SS_MAX 3
3905 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3906 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3907 	int s, ss;
3908 
3909 	for (s = 0; s < info->sseu.max_slices; s++) {
3910 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3911 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3912 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3913 	}
3914 
3915 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3916 		     GEN9_PGCTL_SSA_EU19_ACK |
3917 		     GEN9_PGCTL_SSA_EU210_ACK |
3918 		     GEN9_PGCTL_SSA_EU311_ACK;
3919 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3920 		     GEN9_PGCTL_SSB_EU19_ACK |
3921 		     GEN9_PGCTL_SSB_EU210_ACK |
3922 		     GEN9_PGCTL_SSB_EU311_ACK;
3923 
3924 	for (s = 0; s < info->sseu.max_slices; s++) {
3925 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3926 			/* skip disabled slice */
3927 			continue;
3928 
3929 		sseu->slice_mask |= BIT(s);
3930 
3931 		if (IS_GEN9_BC(dev_priv))
3932 			sseu->subslice_mask[s] =
3933 				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3934 
3935 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3936 			unsigned int eu_cnt;
3937 
3938 			if (IS_GEN9_LP(dev_priv)) {
3939 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3940 					/* skip disabled subslice */
3941 					continue;
3942 
3943 				sseu->subslice_mask[s] |= BIT(ss);
3944 			}
3945 
3946 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3947 					       eu_mask[ss%2]);
3948 			sseu->eu_total += eu_cnt;
3949 			sseu->eu_per_subslice = max_t(unsigned int,
3950 						      sseu->eu_per_subslice,
3951 						      eu_cnt);
3952 		}
3953 	}
3954 #undef SS_MAX
3955 }
3956 
3957 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3958 					 struct sseu_dev_info *sseu)
3959 {
3960 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3961 	int s;
3962 
3963 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3964 
3965 	if (sseu->slice_mask) {
3966 		sseu->eu_per_subslice =
3967 			RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
3968 		for (s = 0; s < fls(sseu->slice_mask); s++) {
3969 			sseu->subslice_mask[s] =
3970 				RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3971 		}
3972 		sseu->eu_total = sseu->eu_per_subslice *
3973 				 intel_sseu_subslice_total(sseu);
3974 
3975 		/* subtract fused off EU(s) from enabled slice(s) */
3976 		for (s = 0; s < fls(sseu->slice_mask); s++) {
3977 			u8 subslice_7eu =
3978 				RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
3979 
3980 			sseu->eu_total -= hweight8(subslice_7eu);
3981 		}
3982 	}
3983 }
3984 
3985 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3986 				 const struct sseu_dev_info *sseu)
3987 {
3988 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3989 	const char *type = is_available_info ? "Available" : "Enabled";
3990 	int s;
3991 
3992 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
3993 		   sseu->slice_mask);
3994 	seq_printf(m, "  %s Slice Total: %u\n", type,
3995 		   hweight8(sseu->slice_mask));
3996 	seq_printf(m, "  %s Subslice Total: %u\n", type,
3997 		   intel_sseu_subslice_total(sseu));
3998 	for (s = 0; s < fls(sseu->slice_mask); s++) {
3999 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4000 			   s, intel_sseu_subslices_per_slice(sseu, s));
4001 	}
4002 	seq_printf(m, "  %s EU Total: %u\n", type,
4003 		   sseu->eu_total);
4004 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4005 		   sseu->eu_per_subslice);
4006 
4007 	if (!is_available_info)
4008 		return;
4009 
4010 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4011 	if (HAS_POOLED_EU(dev_priv))
4012 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4013 
4014 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4015 		   yesno(sseu->has_slice_pg));
4016 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4017 		   yesno(sseu->has_subslice_pg));
4018 	seq_printf(m, "  Has EU Power Gating: %s\n",
4019 		   yesno(sseu->has_eu_pg));
4020 }
4021 
4022 static int i915_sseu_status(struct seq_file *m, void *unused)
4023 {
4024 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4025 	struct sseu_dev_info sseu;
4026 	intel_wakeref_t wakeref;
4027 
4028 	if (INTEL_GEN(dev_priv) < 8)
4029 		return -ENODEV;
4030 
4031 	seq_puts(m, "SSEU Device Info\n");
4032 	i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4033 
4034 	seq_puts(m, "SSEU Device Status\n");
4035 	memset(&sseu, 0, sizeof(sseu));
4036 	sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4037 	sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4038 	sseu.max_eus_per_subslice =
4039 		RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4040 
4041 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
4042 		if (IS_CHERRYVIEW(dev_priv))
4043 			cherryview_sseu_device_status(dev_priv, &sseu);
4044 		else if (IS_BROADWELL(dev_priv))
4045 			broadwell_sseu_device_status(dev_priv, &sseu);
4046 		else if (IS_GEN(dev_priv, 9))
4047 			gen9_sseu_device_status(dev_priv, &sseu);
4048 		else if (INTEL_GEN(dev_priv) >= 10)
4049 			gen10_sseu_device_status(dev_priv, &sseu);
4050 	}
4051 
4052 	i915_print_sseu_info(m, false, &sseu);
4053 
4054 	return 0;
4055 }
4056 
4057 static int i915_forcewake_open(struct inode *inode, struct file *file)
4058 {
4059 	struct drm_i915_private *i915 = inode->i_private;
4060 
4061 	if (INTEL_GEN(i915) < 6)
4062 		return 0;
4063 
4064 	file->private_data =
4065 		(void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
4066 	intel_uncore_forcewake_user_get(&i915->uncore);
4067 
4068 	return 0;
4069 }
4070 
4071 static int i915_forcewake_release(struct inode *inode, struct file *file)
4072 {
4073 	struct drm_i915_private *i915 = inode->i_private;
4074 
4075 	if (INTEL_GEN(i915) < 6)
4076 		return 0;
4077 
4078 	intel_uncore_forcewake_user_put(&i915->uncore);
4079 	intel_runtime_pm_put(&i915->runtime_pm,
4080 			     (intel_wakeref_t)(uintptr_t)file->private_data);
4081 
4082 	return 0;
4083 }
4084 
4085 static const struct file_operations i915_forcewake_fops = {
4086 	.owner = THIS_MODULE,
4087 	.open = i915_forcewake_open,
4088 	.release = i915_forcewake_release,
4089 };
4090 
4091 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4092 {
4093 	struct drm_i915_private *dev_priv = m->private;
4094 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4095 
4096 	/* Synchronize with everything first in case there's been an HPD
4097 	 * storm, but we haven't finished handling it in the kernel yet
4098 	 */
4099 	intel_synchronize_irq(dev_priv);
4100 	flush_work(&dev_priv->hotplug.dig_port_work);
4101 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
4102 
4103 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4104 	seq_printf(m, "Detected: %s\n",
4105 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4106 
4107 	return 0;
4108 }
4109 
4110 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4111 					const char __user *ubuf, size_t len,
4112 					loff_t *offp)
4113 {
4114 	struct seq_file *m = file->private_data;
4115 	struct drm_i915_private *dev_priv = m->private;
4116 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4117 	unsigned int new_threshold;
4118 	int i;
4119 	char *newline;
4120 	char tmp[16];
4121 
4122 	if (len >= sizeof(tmp))
4123 		return -EINVAL;
4124 
4125 	if (copy_from_user(tmp, ubuf, len))
4126 		return -EFAULT;
4127 
4128 	tmp[len] = '\0';
4129 
4130 	/* Strip newline, if any */
4131 	newline = strchr(tmp, '\n');
4132 	if (newline)
4133 		*newline = '\0';
4134 
4135 	if (strcmp(tmp, "reset") == 0)
4136 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4137 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4138 		return -EINVAL;
4139 
4140 	if (new_threshold > 0)
4141 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4142 			      new_threshold);
4143 	else
4144 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4145 
4146 	spin_lock_irq(&dev_priv->irq_lock);
4147 	hotplug->hpd_storm_threshold = new_threshold;
4148 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4149 	for_each_hpd_pin(i)
4150 		hotplug->stats[i].count = 0;
4151 	spin_unlock_irq(&dev_priv->irq_lock);
4152 
4153 	/* Re-enable hpd immediately if we were in an irq storm */
4154 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4155 
4156 	return len;
4157 }
4158 
4159 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4160 {
4161 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4162 }
4163 
4164 static const struct file_operations i915_hpd_storm_ctl_fops = {
4165 	.owner = THIS_MODULE,
4166 	.open = i915_hpd_storm_ctl_open,
4167 	.read = seq_read,
4168 	.llseek = seq_lseek,
4169 	.release = single_release,
4170 	.write = i915_hpd_storm_ctl_write
4171 };
4172 
4173 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4174 {
4175 	struct drm_i915_private *dev_priv = m->private;
4176 
4177 	seq_printf(m, "Enabled: %s\n",
4178 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4179 
4180 	return 0;
4181 }
4182 
4183 static int
4184 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4185 {
4186 	return single_open(file, i915_hpd_short_storm_ctl_show,
4187 			   inode->i_private);
4188 }
4189 
4190 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4191 					      const char __user *ubuf,
4192 					      size_t len, loff_t *offp)
4193 {
4194 	struct seq_file *m = file->private_data;
4195 	struct drm_i915_private *dev_priv = m->private;
4196 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4197 	char *newline;
4198 	char tmp[16];
4199 	int i;
4200 	bool new_state;
4201 
4202 	if (len >= sizeof(tmp))
4203 		return -EINVAL;
4204 
4205 	if (copy_from_user(tmp, ubuf, len))
4206 		return -EFAULT;
4207 
4208 	tmp[len] = '\0';
4209 
4210 	/* Strip newline, if any */
4211 	newline = strchr(tmp, '\n');
4212 	if (newline)
4213 		*newline = '\0';
4214 
4215 	/* Reset to the "default" state for this system */
4216 	if (strcmp(tmp, "reset") == 0)
4217 		new_state = !HAS_DP_MST(dev_priv);
4218 	else if (kstrtobool(tmp, &new_state) != 0)
4219 		return -EINVAL;
4220 
4221 	DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4222 		      new_state ? "En" : "Dis");
4223 
4224 	spin_lock_irq(&dev_priv->irq_lock);
4225 	hotplug->hpd_short_storm_enabled = new_state;
4226 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4227 	for_each_hpd_pin(i)
4228 		hotplug->stats[i].count = 0;
4229 	spin_unlock_irq(&dev_priv->irq_lock);
4230 
4231 	/* Re-enable hpd immediately if we were in an irq storm */
4232 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4233 
4234 	return len;
4235 }
4236 
4237 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4238 	.owner = THIS_MODULE,
4239 	.open = i915_hpd_short_storm_ctl_open,
4240 	.read = seq_read,
4241 	.llseek = seq_lseek,
4242 	.release = single_release,
4243 	.write = i915_hpd_short_storm_ctl_write,
4244 };
4245 
4246 static int i915_drrs_ctl_set(void *data, u64 val)
4247 {
4248 	struct drm_i915_private *dev_priv = data;
4249 	struct drm_device *dev = &dev_priv->drm;
4250 	struct intel_crtc *crtc;
4251 
4252 	if (INTEL_GEN(dev_priv) < 7)
4253 		return -ENODEV;
4254 
4255 	for_each_intel_crtc(dev, crtc) {
4256 		struct drm_connector_list_iter conn_iter;
4257 		struct intel_crtc_state *crtc_state;
4258 		struct drm_connector *connector;
4259 		struct drm_crtc_commit *commit;
4260 		int ret;
4261 
4262 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4263 		if (ret)
4264 			return ret;
4265 
4266 		crtc_state = to_intel_crtc_state(crtc->base.state);
4267 
4268 		if (!crtc_state->base.active ||
4269 		    !crtc_state->has_drrs)
4270 			goto out;
4271 
4272 		commit = crtc_state->base.commit;
4273 		if (commit) {
4274 			ret = wait_for_completion_interruptible(&commit->hw_done);
4275 			if (ret)
4276 				goto out;
4277 		}
4278 
4279 		drm_connector_list_iter_begin(dev, &conn_iter);
4280 		drm_for_each_connector_iter(connector, &conn_iter) {
4281 			struct intel_encoder *encoder;
4282 			struct intel_dp *intel_dp;
4283 
4284 			if (!(crtc_state->base.connector_mask &
4285 			      drm_connector_mask(connector)))
4286 				continue;
4287 
4288 			encoder = intel_attached_encoder(connector);
4289 			if (encoder->type != INTEL_OUTPUT_EDP)
4290 				continue;
4291 
4292 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4293 						val ? "en" : "dis", val);
4294 
4295 			intel_dp = enc_to_intel_dp(&encoder->base);
4296 			if (val)
4297 				intel_edp_drrs_enable(intel_dp,
4298 						      crtc_state);
4299 			else
4300 				intel_edp_drrs_disable(intel_dp,
4301 						       crtc_state);
4302 		}
4303 		drm_connector_list_iter_end(&conn_iter);
4304 
4305 out:
4306 		drm_modeset_unlock(&crtc->base.mutex);
4307 		if (ret)
4308 			return ret;
4309 	}
4310 
4311 	return 0;
4312 }
4313 
4314 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4315 
4316 static ssize_t
4317 i915_fifo_underrun_reset_write(struct file *filp,
4318 			       const char __user *ubuf,
4319 			       size_t cnt, loff_t *ppos)
4320 {
4321 	struct drm_i915_private *dev_priv = filp->private_data;
4322 	struct intel_crtc *intel_crtc;
4323 	struct drm_device *dev = &dev_priv->drm;
4324 	int ret;
4325 	bool reset;
4326 
4327 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4328 	if (ret)
4329 		return ret;
4330 
4331 	if (!reset)
4332 		return cnt;
4333 
4334 	for_each_intel_crtc(dev, intel_crtc) {
4335 		struct drm_crtc_commit *commit;
4336 		struct intel_crtc_state *crtc_state;
4337 
4338 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4339 		if (ret)
4340 			return ret;
4341 
4342 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4343 		commit = crtc_state->base.commit;
4344 		if (commit) {
4345 			ret = wait_for_completion_interruptible(&commit->hw_done);
4346 			if (!ret)
4347 				ret = wait_for_completion_interruptible(&commit->flip_done);
4348 		}
4349 
4350 		if (!ret && crtc_state->base.active) {
4351 			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4352 				      pipe_name(intel_crtc->pipe));
4353 
4354 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4355 		}
4356 
4357 		drm_modeset_unlock(&intel_crtc->base.mutex);
4358 
4359 		if (ret)
4360 			return ret;
4361 	}
4362 
4363 	ret = intel_fbc_reset_underrun(dev_priv);
4364 	if (ret)
4365 		return ret;
4366 
4367 	return cnt;
4368 }
4369 
4370 static const struct file_operations i915_fifo_underrun_reset_ops = {
4371 	.owner = THIS_MODULE,
4372 	.open = simple_open,
4373 	.write = i915_fifo_underrun_reset_write,
4374 	.llseek = default_llseek,
4375 };
4376 
4377 static const struct drm_info_list i915_debugfs_list[] = {
4378 	{"i915_capabilities", i915_capabilities, 0},
4379 	{"i915_gem_objects", i915_gem_object_info, 0},
4380 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4381 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4382 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4383 	{"i915_guc_info", i915_guc_info, 0},
4384 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4385 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4386 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4387 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4388 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4389 	{"i915_frequency_info", i915_frequency_info, 0},
4390 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4391 	{"i915_drpc_info", i915_drpc_info, 0},
4392 	{"i915_emon_status", i915_emon_status, 0},
4393 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4394 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4395 	{"i915_fbc_status", i915_fbc_status, 0},
4396 	{"i915_ips_status", i915_ips_status, 0},
4397 	{"i915_sr_status", i915_sr_status, 0},
4398 	{"i915_opregion", i915_opregion, 0},
4399 	{"i915_vbt", i915_vbt, 0},
4400 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4401 	{"i915_context_status", i915_context_status, 0},
4402 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4403 	{"i915_swizzle_info", i915_swizzle_info, 0},
4404 	{"i915_llc", i915_llc, 0},
4405 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4406 	{"i915_energy_uJ", i915_energy_uJ, 0},
4407 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4408 	{"i915_power_domain_info", i915_power_domain_info, 0},
4409 	{"i915_dmc_info", i915_dmc_info, 0},
4410 	{"i915_display_info", i915_display_info, 0},
4411 	{"i915_engine_info", i915_engine_info, 0},
4412 	{"i915_rcs_topology", i915_rcs_topology, 0},
4413 	{"i915_shrinker_info", i915_shrinker_info, 0},
4414 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4415 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4416 	{"i915_wa_registers", i915_wa_registers, 0},
4417 	{"i915_ddb_info", i915_ddb_info, 0},
4418 	{"i915_sseu_status", i915_sseu_status, 0},
4419 	{"i915_drrs_status", i915_drrs_status, 0},
4420 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4421 };
4422 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4423 
4424 static const struct i915_debugfs_files {
4425 	const char *name;
4426 	const struct file_operations *fops;
4427 } i915_debugfs_files[] = {
4428 	{"i915_wedged", &i915_wedged_fops},
4429 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4430 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4431 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4432 	{"i915_error_state", &i915_error_state_fops},
4433 	{"i915_gpu_info", &i915_gpu_info_fops},
4434 #endif
4435 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4436 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4437 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4438 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4439 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4440 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4441 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4442 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4443 	{"i915_guc_log_level", &i915_guc_log_level_fops},
4444 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4445 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4446 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4447 	{"i915_ipc_status", &i915_ipc_status_fops},
4448 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4449 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4450 };
4451 
4452 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4453 {
4454 	struct drm_minor *minor = dev_priv->drm.primary;
4455 	int i;
4456 
4457 	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4458 			    to_i915(minor->dev), &i915_forcewake_fops);
4459 
4460 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4461 		debugfs_create_file(i915_debugfs_files[i].name,
4462 				    S_IRUGO | S_IWUSR,
4463 				    minor->debugfs_root,
4464 				    to_i915(minor->dev),
4465 				    i915_debugfs_files[i].fops);
4466 	}
4467 
4468 	return drm_debugfs_create_files(i915_debugfs_list,
4469 					I915_DEBUGFS_ENTRIES,
4470 					minor->debugfs_root, minor);
4471 }
4472 
4473 struct dpcd_block {
4474 	/* DPCD dump start address. */
4475 	unsigned int offset;
4476 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4477 	unsigned int end;
4478 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4479 	size_t size;
4480 	/* Only valid for eDP. */
4481 	bool edp;
4482 };
4483 
4484 static const struct dpcd_block i915_dpcd_debug[] = {
4485 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4486 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4487 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4488 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4489 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4490 	{ .offset = DP_SET_POWER },
4491 	{ .offset = DP_EDP_DPCD_REV },
4492 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4493 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4494 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4495 };
4496 
4497 static int i915_dpcd_show(struct seq_file *m, void *data)
4498 {
4499 	struct drm_connector *connector = m->private;
4500 	struct intel_dp *intel_dp =
4501 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4502 	u8 buf[16];
4503 	ssize_t err;
4504 	int i;
4505 
4506 	if (connector->status != connector_status_connected)
4507 		return -ENODEV;
4508 
4509 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4510 		const struct dpcd_block *b = &i915_dpcd_debug[i];
4511 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4512 
4513 		if (b->edp &&
4514 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4515 			continue;
4516 
4517 		/* low tech for now */
4518 		if (WARN_ON(size > sizeof(buf)))
4519 			continue;
4520 
4521 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4522 		if (err < 0)
4523 			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4524 		else
4525 			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4526 	}
4527 
4528 	return 0;
4529 }
4530 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4531 
4532 static int i915_panel_show(struct seq_file *m, void *data)
4533 {
4534 	struct drm_connector *connector = m->private;
4535 	struct intel_dp *intel_dp =
4536 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4537 
4538 	if (connector->status != connector_status_connected)
4539 		return -ENODEV;
4540 
4541 	seq_printf(m, "Panel power up delay: %d\n",
4542 		   intel_dp->panel_power_up_delay);
4543 	seq_printf(m, "Panel power down delay: %d\n",
4544 		   intel_dp->panel_power_down_delay);
4545 	seq_printf(m, "Backlight on delay: %d\n",
4546 		   intel_dp->backlight_on_delay);
4547 	seq_printf(m, "Backlight off delay: %d\n",
4548 		   intel_dp->backlight_off_delay);
4549 
4550 	return 0;
4551 }
4552 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4553 
4554 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4555 {
4556 	struct drm_connector *connector = m->private;
4557 	struct intel_connector *intel_connector = to_intel_connector(connector);
4558 
4559 	if (connector->status != connector_status_connected)
4560 		return -ENODEV;
4561 
4562 	/* HDCP is supported by connector */
4563 	if (!intel_connector->hdcp.shim)
4564 		return -EINVAL;
4565 
4566 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
4567 		   connector->base.id);
4568 	intel_hdcp_info(m, intel_connector);
4569 
4570 	return 0;
4571 }
4572 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4573 
4574 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4575 {
4576 	struct drm_connector *connector = m->private;
4577 	struct drm_device *dev = connector->dev;
4578 	struct drm_crtc *crtc;
4579 	struct intel_dp *intel_dp;
4580 	struct drm_modeset_acquire_ctx ctx;
4581 	struct intel_crtc_state *crtc_state = NULL;
4582 	int ret = 0;
4583 	bool try_again = false;
4584 
4585 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4586 
4587 	do {
4588 		try_again = false;
4589 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4590 				       &ctx);
4591 		if (ret) {
4592 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4593 				try_again = true;
4594 				continue;
4595 			}
4596 			break;
4597 		}
4598 		crtc = connector->state->crtc;
4599 		if (connector->status != connector_status_connected || !crtc) {
4600 			ret = -ENODEV;
4601 			break;
4602 		}
4603 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
4604 		if (ret == -EDEADLK) {
4605 			ret = drm_modeset_backoff(&ctx);
4606 			if (!ret) {
4607 				try_again = true;
4608 				continue;
4609 			}
4610 			break;
4611 		} else if (ret) {
4612 			break;
4613 		}
4614 		intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4615 		crtc_state = to_intel_crtc_state(crtc->state);
4616 		seq_printf(m, "DSC_Enabled: %s\n",
4617 			   yesno(crtc_state->dsc_params.compression_enable));
4618 		seq_printf(m, "DSC_Sink_Support: %s\n",
4619 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4620 		seq_printf(m, "Force_DSC_Enable: %s\n",
4621 			   yesno(intel_dp->force_dsc_en));
4622 		if (!intel_dp_is_edp(intel_dp))
4623 			seq_printf(m, "FEC_Sink_Support: %s\n",
4624 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4625 	} while (try_again);
4626 
4627 	drm_modeset_drop_locks(&ctx);
4628 	drm_modeset_acquire_fini(&ctx);
4629 
4630 	return ret;
4631 }
4632 
4633 static ssize_t i915_dsc_fec_support_write(struct file *file,
4634 					  const char __user *ubuf,
4635 					  size_t len, loff_t *offp)
4636 {
4637 	bool dsc_enable = false;
4638 	int ret;
4639 	struct drm_connector *connector =
4640 		((struct seq_file *)file->private_data)->private;
4641 	struct intel_encoder *encoder = intel_attached_encoder(connector);
4642 	struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4643 
4644 	if (len == 0)
4645 		return 0;
4646 
4647 	DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4648 			 len);
4649 
4650 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4651 	if (ret < 0)
4652 		return ret;
4653 
4654 	DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4655 			 (dsc_enable) ? "true" : "false");
4656 	intel_dp->force_dsc_en = dsc_enable;
4657 
4658 	*offp += len;
4659 	return len;
4660 }
4661 
4662 static int i915_dsc_fec_support_open(struct inode *inode,
4663 				     struct file *file)
4664 {
4665 	return single_open(file, i915_dsc_fec_support_show,
4666 			   inode->i_private);
4667 }
4668 
4669 static const struct file_operations i915_dsc_fec_support_fops = {
4670 	.owner = THIS_MODULE,
4671 	.open = i915_dsc_fec_support_open,
4672 	.read = seq_read,
4673 	.llseek = seq_lseek,
4674 	.release = single_release,
4675 	.write = i915_dsc_fec_support_write
4676 };
4677 
4678 /**
4679  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4680  * @connector: pointer to a registered drm_connector
4681  *
4682  * Cleanup will be done by drm_connector_unregister() through a call to
4683  * drm_debugfs_connector_remove().
4684  *
4685  * Returns 0 on success, negative error codes on error.
4686  */
4687 int i915_debugfs_connector_add(struct drm_connector *connector)
4688 {
4689 	struct dentry *root = connector->debugfs_entry;
4690 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4691 
4692 	/* The connector must have been registered beforehands. */
4693 	if (!root)
4694 		return -ENODEV;
4695 
4696 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4697 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4698 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4699 				    connector, &i915_dpcd_fops);
4700 
4701 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4702 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4703 				    connector, &i915_panel_fops);
4704 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4705 				    connector, &i915_psr_sink_status_fops);
4706 	}
4707 
4708 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4709 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4710 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4711 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4712 				    connector, &i915_hdcp_sink_capability_fops);
4713 	}
4714 
4715 	if (INTEL_GEN(dev_priv) >= 10 &&
4716 	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4717 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4718 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4719 				    connector, &i915_dsc_fec_support_fops);
4720 
4721 	return 0;
4722 }
4723