1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34 
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37 	return to_i915(node->minor->dev);
38 }
39 
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
44 	struct drm_printer p = drm_seq_file_printer(m);
45 
46 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47 	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49 
50 	intel_device_info_dump_flags(info, &p);
51 	intel_device_info_dump_runtime(info, &p);
52 	intel_driver_caps_print(&dev_priv->caps, &p);
53 
54 	kernel_param_lock(THIS_MODULE);
55 	i915_params_dump(&i915_modparams, &p);
56 	kernel_param_unlock(THIS_MODULE);
57 
58 	return 0;
59 }
60 
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63 	return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65 
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68 	return obj->pin_global ? 'p' : ' ';
69 }
70 
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73 	switch (i915_gem_object_get_tiling(obj)) {
74 	default:
75 	case I915_TILING_NONE: return ' ';
76 	case I915_TILING_X: return 'X';
77 	case I915_TILING_Y: return 'Y';
78 	}
79 }
80 
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83 	return obj->userfault_count ? 'g' : ' ';
84 }
85 
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88 	return obj->mm.mapping ? 'M' : ' ';
89 }
90 
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93 	u64 size = 0;
94 	struct i915_vma *vma;
95 
96 	for_each_ggtt_vma(vma, obj) {
97 		if (drm_mm_node_allocated(&vma->node))
98 			size += vma->node.size;
99 	}
100 
101 	return size;
102 }
103 
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107 	size_t x = 0;
108 
109 	switch (page_sizes) {
110 	case 0:
111 		return "";
112 	case I915_GTT_PAGE_SIZE_4K:
113 		return "4K";
114 	case I915_GTT_PAGE_SIZE_64K:
115 		return "64K";
116 	case I915_GTT_PAGE_SIZE_2M:
117 		return "2M";
118 	default:
119 		if (!buf)
120 			return "M";
121 
122 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 			x += snprintf(buf + x, len - x, "2M, ");
124 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 			x += snprintf(buf + x, len - x, "64K, ");
126 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 			x += snprintf(buf + x, len - x, "4K, ");
128 		buf[x-2] = '\0';
129 
130 		return buf;
131 	}
132 }
133 
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138 	struct intel_engine_cs *engine;
139 	struct i915_vma *vma;
140 	unsigned int frontbuffer_bits;
141 	int pin_count = 0;
142 
143 	lockdep_assert_held(&obj->base.dev->struct_mutex);
144 
145 	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146 		   &obj->base,
147 		   get_active_flag(obj),
148 		   get_pin_flag(obj),
149 		   get_tiling_flag(obj),
150 		   get_global_flag(obj),
151 		   get_pin_mapped_flag(obj),
152 		   obj->base.size / 1024,
153 		   obj->read_domains,
154 		   obj->write_domain,
155 		   i915_cache_level_str(dev_priv, obj->cache_level),
156 		   obj->mm.dirty ? " dirty" : "",
157 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158 	if (obj->base.name)
159 		seq_printf(m, " (name: %d)", obj->base.name);
160 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
161 		if (i915_vma_is_pinned(vma))
162 			pin_count++;
163 	}
164 	seq_printf(m, " (pinned x %d)", pin_count);
165 	if (obj->pin_global)
166 		seq_printf(m, " (global)");
167 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
168 		if (!drm_mm_node_allocated(&vma->node))
169 			continue;
170 
171 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
173 			   vma->node.start, vma->node.size,
174 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175 		if (i915_vma_is_ggtt(vma)) {
176 			switch (vma->ggtt_view.type) {
177 			case I915_GGTT_VIEW_NORMAL:
178 				seq_puts(m, ", normal");
179 				break;
180 
181 			case I915_GGTT_VIEW_PARTIAL:
182 				seq_printf(m, ", partial [%08llx+%x]",
183 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
185 				break;
186 
187 			case I915_GGTT_VIEW_ROTATED:
188 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189 					   vma->ggtt_view.rotated.plane[0].width,
190 					   vma->ggtt_view.rotated.plane[0].height,
191 					   vma->ggtt_view.rotated.plane[0].stride,
192 					   vma->ggtt_view.rotated.plane[0].offset,
193 					   vma->ggtt_view.rotated.plane[1].width,
194 					   vma->ggtt_view.rotated.plane[1].height,
195 					   vma->ggtt_view.rotated.plane[1].stride,
196 					   vma->ggtt_view.rotated.plane[1].offset);
197 				break;
198 
199 			default:
200 				MISSING_CASE(vma->ggtt_view.type);
201 				break;
202 			}
203 		}
204 		if (vma->fence)
205 			seq_printf(m, " , fence: %d%s",
206 				   vma->fence->id,
207 				   i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208 		seq_puts(m, ")");
209 	}
210 	if (obj->stolen)
211 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212 
213 	engine = i915_gem_object_last_write_engine(obj);
214 	if (engine)
215 		seq_printf(m, " (%s)", engine->name);
216 
217 	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 	if (frontbuffer_bits)
219 		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221 
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224 	const struct drm_i915_gem_object *a =
225 		*(const struct drm_i915_gem_object **)A;
226 	const struct drm_i915_gem_object *b =
227 		*(const struct drm_i915_gem_object **)B;
228 
229 	if (a->stolen->start < b->stolen->start)
230 		return -1;
231 	if (a->stolen->start > b->stolen->start)
232 		return 1;
233 	return 0;
234 }
235 
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 	struct drm_device *dev = &dev_priv->drm;
240 	struct drm_i915_gem_object **objects;
241 	struct drm_i915_gem_object *obj;
242 	u64 total_obj_size, total_gtt_size;
243 	unsigned long total, count, n;
244 	int ret;
245 
246 	total = READ_ONCE(dev_priv->mm.object_count);
247 	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248 	if (!objects)
249 		return -ENOMEM;
250 
251 	ret = mutex_lock_interruptible(&dev->struct_mutex);
252 	if (ret)
253 		goto out;
254 
255 	total_obj_size = total_gtt_size = count = 0;
256 
257 	spin_lock(&dev_priv->mm.obj_lock);
258 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259 		if (count == total)
260 			break;
261 
262 		if (obj->stolen == NULL)
263 			continue;
264 
265 		objects[count++] = obj;
266 		total_obj_size += obj->base.size;
267 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268 
269 	}
270 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271 		if (count == total)
272 			break;
273 
274 		if (obj->stolen == NULL)
275 			continue;
276 
277 		objects[count++] = obj;
278 		total_obj_size += obj->base.size;
279 	}
280 	spin_unlock(&dev_priv->mm.obj_lock);
281 
282 	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283 
284 	seq_puts(m, "Stolen:\n");
285 	for (n = 0; n < count; n++) {
286 		seq_puts(m, "   ");
287 		describe_obj(m, objects[n]);
288 		seq_putc(m, '\n');
289 	}
290 	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291 		   count, total_obj_size, total_gtt_size);
292 
293 	mutex_unlock(&dev->struct_mutex);
294 out:
295 	kvfree(objects);
296 	return ret;
297 }
298 
299 struct file_stats {
300 	struct drm_i915_file_private *file_priv;
301 	unsigned long count;
302 	u64 total, unbound;
303 	u64 global, shared;
304 	u64 active, inactive;
305 };
306 
307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309 	struct drm_i915_gem_object *obj = ptr;
310 	struct file_stats *stats = data;
311 	struct i915_vma *vma;
312 
313 	lockdep_assert_held(&obj->base.dev->struct_mutex);
314 
315 	stats->count++;
316 	stats->total += obj->base.size;
317 	if (!obj->bind_count)
318 		stats->unbound += obj->base.size;
319 	if (obj->base.name || obj->base.dma_buf)
320 		stats->shared += obj->base.size;
321 
322 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 		if (!drm_mm_node_allocated(&vma->node))
324 			continue;
325 
326 		if (i915_vma_is_ggtt(vma)) {
327 			stats->global += vma->node.size;
328 		} else {
329 			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330 
331 			if (ppgtt->vm.file != stats->file_priv)
332 				continue;
333 		}
334 
335 		if (i915_vma_is_active(vma))
336 			stats->active += vma->node.size;
337 		else
338 			stats->inactive += vma->node.size;
339 	}
340 
341 	return 0;
342 }
343 
344 #define print_file_stats(m, name, stats) do { \
345 	if (stats.count) \
346 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347 			   name, \
348 			   stats.count, \
349 			   stats.total, \
350 			   stats.active, \
351 			   stats.inactive, \
352 			   stats.global, \
353 			   stats.shared, \
354 			   stats.unbound); \
355 } while (0)
356 
357 static void print_batch_pool_stats(struct seq_file *m,
358 				   struct drm_i915_private *dev_priv)
359 {
360 	struct drm_i915_gem_object *obj;
361 	struct file_stats stats;
362 	struct intel_engine_cs *engine;
363 	enum intel_engine_id id;
364 	int j;
365 
366 	memset(&stats, 0, sizeof(stats));
367 
368 	for_each_engine(engine, dev_priv, id) {
369 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370 			list_for_each_entry(obj,
371 					    &engine->batch_pool.cache_list[j],
372 					    batch_pool_link)
373 				per_file_stats(0, obj, &stats);
374 		}
375 	}
376 
377 	print_file_stats(m, "[k]batch pool", stats);
378 }
379 
380 static int per_file_ctx_stats(int idx, void *ptr, void *data)
381 {
382 	struct i915_gem_context *ctx = ptr;
383 	struct intel_engine_cs *engine;
384 	enum intel_engine_id id;
385 
386 	for_each_engine(engine, ctx->i915, id) {
387 		struct intel_context *ce = to_intel_context(ctx, engine);
388 
389 		if (ce->state)
390 			per_file_stats(0, ce->state->obj, data);
391 		if (ce->ring)
392 			per_file_stats(0, ce->ring->vma->obj, data);
393 	}
394 
395 	return 0;
396 }
397 
398 static void print_context_stats(struct seq_file *m,
399 				struct drm_i915_private *dev_priv)
400 {
401 	struct drm_device *dev = &dev_priv->drm;
402 	struct file_stats stats;
403 	struct drm_file *file;
404 
405 	memset(&stats, 0, sizeof(stats));
406 
407 	mutex_lock(&dev->struct_mutex);
408 	if (dev_priv->kernel_context)
409 		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410 
411 	list_for_each_entry(file, &dev->filelist, lhead) {
412 		struct drm_i915_file_private *fpriv = file->driver_priv;
413 		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 	}
415 	mutex_unlock(&dev->struct_mutex);
416 
417 	print_file_stats(m, "[k]contexts", stats);
418 }
419 
420 static int i915_gem_object_info(struct seq_file *m, void *data)
421 {
422 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 	struct drm_device *dev = &dev_priv->drm;
424 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
425 	u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 	u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427 	struct drm_i915_gem_object *obj;
428 	unsigned int page_sizes = 0;
429 	struct drm_file *file;
430 	char buf[80];
431 	int ret;
432 
433 	ret = mutex_lock_interruptible(&dev->struct_mutex);
434 	if (ret)
435 		return ret;
436 
437 	seq_printf(m, "%u objects, %llu bytes\n",
438 		   dev_priv->mm.object_count,
439 		   dev_priv->mm.object_memory);
440 
441 	size = count = 0;
442 	mapped_size = mapped_count = 0;
443 	purgeable_size = purgeable_count = 0;
444 	huge_size = huge_count = 0;
445 
446 	spin_lock(&dev_priv->mm.obj_lock);
447 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448 		size += obj->base.size;
449 		++count;
450 
451 		if (obj->mm.madv == I915_MADV_DONTNEED) {
452 			purgeable_size += obj->base.size;
453 			++purgeable_count;
454 		}
455 
456 		if (obj->mm.mapping) {
457 			mapped_count++;
458 			mapped_size += obj->base.size;
459 		}
460 
461 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 			huge_count++;
463 			huge_size += obj->base.size;
464 			page_sizes |= obj->mm.page_sizes.sg;
465 		}
466 	}
467 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468 
469 	size = count = dpy_size = dpy_count = 0;
470 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471 		size += obj->base.size;
472 		++count;
473 
474 		if (obj->pin_global) {
475 			dpy_size += obj->base.size;
476 			++dpy_count;
477 		}
478 
479 		if (obj->mm.madv == I915_MADV_DONTNEED) {
480 			purgeable_size += obj->base.size;
481 			++purgeable_count;
482 		}
483 
484 		if (obj->mm.mapping) {
485 			mapped_count++;
486 			mapped_size += obj->base.size;
487 		}
488 
489 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 			huge_count++;
491 			huge_size += obj->base.size;
492 			page_sizes |= obj->mm.page_sizes.sg;
493 		}
494 	}
495 	spin_unlock(&dev_priv->mm.obj_lock);
496 
497 	seq_printf(m, "%u bound objects, %llu bytes\n",
498 		   count, size);
499 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
500 		   purgeable_count, purgeable_size);
501 	seq_printf(m, "%u mapped objects, %llu bytes\n",
502 		   mapped_count, mapped_size);
503 	seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 		   huge_count,
505 		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 		   huge_size);
507 	seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508 		   dpy_count, dpy_size);
509 
510 	seq_printf(m, "%llu [%pa] gtt total\n",
511 		   ggtt->vm.total, &ggtt->mappable_end);
512 	seq_printf(m, "Supported page sizes: %s\n",
513 		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 					buf, sizeof(buf)));
515 
516 	seq_putc(m, '\n');
517 	print_batch_pool_stats(m, dev_priv);
518 	mutex_unlock(&dev->struct_mutex);
519 
520 	mutex_lock(&dev->filelist_mutex);
521 	print_context_stats(m, dev_priv);
522 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 		struct file_stats stats;
524 		struct drm_i915_file_private *file_priv = file->driver_priv;
525 		struct i915_request *request;
526 		struct task_struct *task;
527 
528 		mutex_lock(&dev->struct_mutex);
529 
530 		memset(&stats, 0, sizeof(stats));
531 		stats.file_priv = file->driver_priv;
532 		spin_lock(&file->table_lock);
533 		idr_for_each(&file->object_idr, per_file_stats, &stats);
534 		spin_unlock(&file->table_lock);
535 		/*
536 		 * Although we have a valid reference on file->pid, that does
537 		 * not guarantee that the task_struct who called get_pid() is
538 		 * still alive (e.g. get_pid(current) => fork() => exit()).
539 		 * Therefore, we need to protect this ->comm access using RCU.
540 		 */
541 		request = list_first_entry_or_null(&file_priv->mm.request_list,
542 						   struct i915_request,
543 						   client_link);
544 		rcu_read_lock();
545 		task = pid_task(request && request->gem_context->pid ?
546 				request->gem_context->pid : file->pid,
547 				PIDTYPE_PID);
548 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
549 		rcu_read_unlock();
550 
551 		mutex_unlock(&dev->struct_mutex);
552 	}
553 	mutex_unlock(&dev->filelist_mutex);
554 
555 	return 0;
556 }
557 
558 static int i915_gem_gtt_info(struct seq_file *m, void *data)
559 {
560 	struct drm_info_node *node = m->private;
561 	struct drm_i915_private *dev_priv = node_to_i915(node);
562 	struct drm_device *dev = &dev_priv->drm;
563 	struct drm_i915_gem_object **objects;
564 	struct drm_i915_gem_object *obj;
565 	u64 total_obj_size, total_gtt_size;
566 	unsigned long nobject, n;
567 	int count, ret;
568 
569 	nobject = READ_ONCE(dev_priv->mm.object_count);
570 	objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 	if (!objects)
572 		return -ENOMEM;
573 
574 	ret = mutex_lock_interruptible(&dev->struct_mutex);
575 	if (ret)
576 		return ret;
577 
578 	count = 0;
579 	spin_lock(&dev_priv->mm.obj_lock);
580 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 		objects[count++] = obj;
582 		if (count == nobject)
583 			break;
584 	}
585 	spin_unlock(&dev_priv->mm.obj_lock);
586 
587 	total_obj_size = total_gtt_size = 0;
588 	for (n = 0;  n < count; n++) {
589 		obj = objects[n];
590 
591 		seq_puts(m, "   ");
592 		describe_obj(m, obj);
593 		seq_putc(m, '\n');
594 		total_obj_size += obj->base.size;
595 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596 	}
597 
598 	mutex_unlock(&dev->struct_mutex);
599 
600 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601 		   count, total_obj_size, total_gtt_size);
602 	kvfree(objects);
603 
604 	return 0;
605 }
606 
607 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608 {
609 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 	struct drm_device *dev = &dev_priv->drm;
611 	struct drm_i915_gem_object *obj;
612 	struct intel_engine_cs *engine;
613 	enum intel_engine_id id;
614 	int total = 0;
615 	int ret, j;
616 
617 	ret = mutex_lock_interruptible(&dev->struct_mutex);
618 	if (ret)
619 		return ret;
620 
621 	for_each_engine(engine, dev_priv, id) {
622 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623 			int count;
624 
625 			count = 0;
626 			list_for_each_entry(obj,
627 					    &engine->batch_pool.cache_list[j],
628 					    batch_pool_link)
629 				count++;
630 			seq_printf(m, "%s cache[%d]: %d objects\n",
631 				   engine->name, j, count);
632 
633 			list_for_each_entry(obj,
634 					    &engine->batch_pool.cache_list[j],
635 					    batch_pool_link) {
636 				seq_puts(m, "   ");
637 				describe_obj(m, obj);
638 				seq_putc(m, '\n');
639 			}
640 
641 			total += count;
642 		}
643 	}
644 
645 	seq_printf(m, "total: %d\n", total);
646 
647 	mutex_unlock(&dev->struct_mutex);
648 
649 	return 0;
650 }
651 
652 static void gen8_display_interrupt_info(struct seq_file *m)
653 {
654 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 	int pipe;
656 
657 	for_each_pipe(dev_priv, pipe) {
658 		enum intel_display_power_domain power_domain;
659 
660 		power_domain = POWER_DOMAIN_PIPE(pipe);
661 		if (!intel_display_power_get_if_enabled(dev_priv,
662 							power_domain)) {
663 			seq_printf(m, "Pipe %c power disabled\n",
664 				   pipe_name(pipe));
665 			continue;
666 		}
667 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 			   pipe_name(pipe),
669 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 			   pipe_name(pipe),
672 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 		seq_printf(m, "Pipe %c IER:\t%08x\n",
674 			   pipe_name(pipe),
675 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
676 
677 		intel_display_power_put(dev_priv, power_domain);
678 	}
679 
680 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 		   I915_READ(GEN8_DE_PORT_IMR));
682 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 		   I915_READ(GEN8_DE_PORT_IIR));
684 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 		   I915_READ(GEN8_DE_PORT_IER));
686 
687 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 		   I915_READ(GEN8_DE_MISC_IMR));
689 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 		   I915_READ(GEN8_DE_MISC_IIR));
691 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 		   I915_READ(GEN8_DE_MISC_IER));
693 
694 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 		   I915_READ(GEN8_PCU_IMR));
696 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 		   I915_READ(GEN8_PCU_IIR));
698 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 		   I915_READ(GEN8_PCU_IER));
700 }
701 
702 static int i915_interrupt_info(struct seq_file *m, void *data)
703 {
704 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
705 	struct intel_engine_cs *engine;
706 	enum intel_engine_id id;
707 	int i, pipe;
708 
709 	intel_runtime_pm_get(dev_priv);
710 
711 	if (IS_CHERRYVIEW(dev_priv)) {
712 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 			   I915_READ(GEN8_MASTER_IRQ));
714 
715 		seq_printf(m, "Display IER:\t%08x\n",
716 			   I915_READ(VLV_IER));
717 		seq_printf(m, "Display IIR:\t%08x\n",
718 			   I915_READ(VLV_IIR));
719 		seq_printf(m, "Display IIR_RW:\t%08x\n",
720 			   I915_READ(VLV_IIR_RW));
721 		seq_printf(m, "Display IMR:\t%08x\n",
722 			   I915_READ(VLV_IMR));
723 		for_each_pipe(dev_priv, pipe) {
724 			enum intel_display_power_domain power_domain;
725 
726 			power_domain = POWER_DOMAIN_PIPE(pipe);
727 			if (!intel_display_power_get_if_enabled(dev_priv,
728 								power_domain)) {
729 				seq_printf(m, "Pipe %c power disabled\n",
730 					   pipe_name(pipe));
731 				continue;
732 			}
733 
734 			seq_printf(m, "Pipe %c stat:\t%08x\n",
735 				   pipe_name(pipe),
736 				   I915_READ(PIPESTAT(pipe)));
737 
738 			intel_display_power_put(dev_priv, power_domain);
739 		}
740 
741 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742 		seq_printf(m, "Port hotplug:\t%08x\n",
743 			   I915_READ(PORT_HOTPLUG_EN));
744 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 			   I915_READ(VLV_DPFLIPSTAT));
746 		seq_printf(m, "DPINVGTT:\t%08x\n",
747 			   I915_READ(DPINVGTT));
748 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
749 
750 		for (i = 0; i < 4; i++) {
751 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 				   i, I915_READ(GEN8_GT_IMR(i)));
753 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 				   i, I915_READ(GEN8_GT_IIR(i)));
755 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 				   i, I915_READ(GEN8_GT_IER(i)));
757 		}
758 
759 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 			   I915_READ(GEN8_PCU_IMR));
761 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 			   I915_READ(GEN8_PCU_IIR));
763 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 			   I915_READ(GEN8_PCU_IER));
765 	} else if (INTEL_GEN(dev_priv) >= 11) {
766 		seq_printf(m, "Master Interrupt Control:  %08x\n",
767 			   I915_READ(GEN11_GFX_MSTR_IRQ));
768 
769 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
770 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
772 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
774 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
778 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
780 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781 
782 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 			   I915_READ(GEN11_DISPLAY_INT_CTL));
784 
785 		gen8_display_interrupt_info(m);
786 	} else if (INTEL_GEN(dev_priv) >= 8) {
787 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 			   I915_READ(GEN8_MASTER_IRQ));
789 
790 		for (i = 0; i < 4; i++) {
791 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 				   i, I915_READ(GEN8_GT_IMR(i)));
793 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 				   i, I915_READ(GEN8_GT_IIR(i)));
795 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 				   i, I915_READ(GEN8_GT_IER(i)));
797 		}
798 
799 		gen8_display_interrupt_info(m);
800 	} else if (IS_VALLEYVIEW(dev_priv)) {
801 		seq_printf(m, "Display IER:\t%08x\n",
802 			   I915_READ(VLV_IER));
803 		seq_printf(m, "Display IIR:\t%08x\n",
804 			   I915_READ(VLV_IIR));
805 		seq_printf(m, "Display IIR_RW:\t%08x\n",
806 			   I915_READ(VLV_IIR_RW));
807 		seq_printf(m, "Display IMR:\t%08x\n",
808 			   I915_READ(VLV_IMR));
809 		for_each_pipe(dev_priv, pipe) {
810 			enum intel_display_power_domain power_domain;
811 
812 			power_domain = POWER_DOMAIN_PIPE(pipe);
813 			if (!intel_display_power_get_if_enabled(dev_priv,
814 								power_domain)) {
815 				seq_printf(m, "Pipe %c power disabled\n",
816 					   pipe_name(pipe));
817 				continue;
818 			}
819 
820 			seq_printf(m, "Pipe %c stat:\t%08x\n",
821 				   pipe_name(pipe),
822 				   I915_READ(PIPESTAT(pipe)));
823 			intel_display_power_put(dev_priv, power_domain);
824 		}
825 
826 		seq_printf(m, "Master IER:\t%08x\n",
827 			   I915_READ(VLV_MASTER_IER));
828 
829 		seq_printf(m, "Render IER:\t%08x\n",
830 			   I915_READ(GTIER));
831 		seq_printf(m, "Render IIR:\t%08x\n",
832 			   I915_READ(GTIIR));
833 		seq_printf(m, "Render IMR:\t%08x\n",
834 			   I915_READ(GTIMR));
835 
836 		seq_printf(m, "PM IER:\t\t%08x\n",
837 			   I915_READ(GEN6_PMIER));
838 		seq_printf(m, "PM IIR:\t\t%08x\n",
839 			   I915_READ(GEN6_PMIIR));
840 		seq_printf(m, "PM IMR:\t\t%08x\n",
841 			   I915_READ(GEN6_PMIMR));
842 
843 		seq_printf(m, "Port hotplug:\t%08x\n",
844 			   I915_READ(PORT_HOTPLUG_EN));
845 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 			   I915_READ(VLV_DPFLIPSTAT));
847 		seq_printf(m, "DPINVGTT:\t%08x\n",
848 			   I915_READ(DPINVGTT));
849 
850 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
851 		seq_printf(m, "Interrupt enable:    %08x\n",
852 			   I915_READ(IER));
853 		seq_printf(m, "Interrupt identity:  %08x\n",
854 			   I915_READ(IIR));
855 		seq_printf(m, "Interrupt mask:      %08x\n",
856 			   I915_READ(IMR));
857 		for_each_pipe(dev_priv, pipe)
858 			seq_printf(m, "Pipe %c stat:         %08x\n",
859 				   pipe_name(pipe),
860 				   I915_READ(PIPESTAT(pipe)));
861 	} else {
862 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
863 			   I915_READ(DEIER));
864 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
865 			   I915_READ(DEIIR));
866 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
867 			   I915_READ(DEIMR));
868 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
869 			   I915_READ(SDEIER));
870 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
871 			   I915_READ(SDEIIR));
872 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
873 			   I915_READ(SDEIMR));
874 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
875 			   I915_READ(GTIER));
876 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
877 			   I915_READ(GTIIR));
878 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
879 			   I915_READ(GTIMR));
880 	}
881 
882 	if (INTEL_GEN(dev_priv) >= 11) {
883 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
895 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901 
902 	} else if (INTEL_GEN(dev_priv) >= 6) {
903 		for_each_engine(engine, dev_priv, id) {
904 			seq_printf(m,
905 				   "Graphics Interrupt mask (%s):	%08x\n",
906 				   engine->name, I915_READ_IMR(engine));
907 		}
908 	}
909 
910 	intel_runtime_pm_put(dev_priv);
911 
912 	return 0;
913 }
914 
915 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916 {
917 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 	struct drm_device *dev = &dev_priv->drm;
919 	int i, ret;
920 
921 	ret = mutex_lock_interruptible(&dev->struct_mutex);
922 	if (ret)
923 		return ret;
924 
925 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
927 		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
928 
929 		seq_printf(m, "Fence %d, pin count = %d, object = ",
930 			   i, dev_priv->fence_regs[i].pin_count);
931 		if (!vma)
932 			seq_puts(m, "unused");
933 		else
934 			describe_obj(m, vma->obj);
935 		seq_putc(m, '\n');
936 	}
937 
938 	mutex_unlock(&dev->struct_mutex);
939 	return 0;
940 }
941 
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
943 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 			      size_t count, loff_t *pos)
945 {
946 	struct i915_gpu_state *error = file->private_data;
947 	struct drm_i915_error_state_buf str;
948 	ssize_t ret;
949 	loff_t tmp;
950 
951 	if (!error)
952 		return 0;
953 
954 	ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955 	if (ret)
956 		return ret;
957 
958 	ret = i915_error_state_to_str(&str, error);
959 	if (ret)
960 		goto out;
961 
962 	tmp = 0;
963 	ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964 	if (ret < 0)
965 		goto out;
966 
967 	*pos = str.start + ret;
968 out:
969 	i915_error_state_buf_release(&str);
970 	return ret;
971 }
972 
973 static int gpu_state_release(struct inode *inode, struct file *file)
974 {
975 	i915_gpu_state_put(file->private_data);
976 	return 0;
977 }
978 
979 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980 {
981 	struct drm_i915_private *i915 = inode->i_private;
982 	struct i915_gpu_state *gpu;
983 
984 	intel_runtime_pm_get(i915);
985 	gpu = i915_capture_gpu_state(i915);
986 	intel_runtime_pm_put(i915);
987 	if (!gpu)
988 		return -ENOMEM;
989 
990 	file->private_data = gpu;
991 	return 0;
992 }
993 
994 static const struct file_operations i915_gpu_info_fops = {
995 	.owner = THIS_MODULE,
996 	.open = i915_gpu_info_open,
997 	.read = gpu_state_read,
998 	.llseek = default_llseek,
999 	.release = gpu_state_release,
1000 };
1001 
1002 static ssize_t
1003 i915_error_state_write(struct file *filp,
1004 		       const char __user *ubuf,
1005 		       size_t cnt,
1006 		       loff_t *ppos)
1007 {
1008 	struct i915_gpu_state *error = filp->private_data;
1009 
1010 	if (!error)
1011 		return 0;
1012 
1013 	DRM_DEBUG_DRIVER("Resetting error state\n");
1014 	i915_reset_error_state(error->i915);
1015 
1016 	return cnt;
1017 }
1018 
1019 static int i915_error_state_open(struct inode *inode, struct file *file)
1020 {
1021 	file->private_data = i915_first_error_state(inode->i_private);
1022 	return 0;
1023 }
1024 
1025 static const struct file_operations i915_error_state_fops = {
1026 	.owner = THIS_MODULE,
1027 	.open = i915_error_state_open,
1028 	.read = gpu_state_read,
1029 	.write = i915_error_state_write,
1030 	.llseek = default_llseek,
1031 	.release = gpu_state_release,
1032 };
1033 #endif
1034 
1035 static int
1036 i915_next_seqno_set(void *data, u64 val)
1037 {
1038 	struct drm_i915_private *dev_priv = data;
1039 	struct drm_device *dev = &dev_priv->drm;
1040 	int ret;
1041 
1042 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 	if (ret)
1044 		return ret;
1045 
1046 	intel_runtime_pm_get(dev_priv);
1047 	ret = i915_gem_set_global_seqno(dev, val);
1048 	intel_runtime_pm_put(dev_priv);
1049 
1050 	mutex_unlock(&dev->struct_mutex);
1051 
1052 	return ret;
1053 }
1054 
1055 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056 			NULL, i915_next_seqno_set,
1057 			"0x%llx\n");
1058 
1059 static int i915_frequency_info(struct seq_file *m, void *unused)
1060 {
1061 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1062 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1063 	int ret = 0;
1064 
1065 	intel_runtime_pm_get(dev_priv);
1066 
1067 	if (IS_GEN5(dev_priv)) {
1068 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070 
1071 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074 			   MEMSTAT_VID_SHIFT);
1075 		seq_printf(m, "Current P-state: %d\n",
1076 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1077 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1078 		u32 rpmodectl, freq_sts;
1079 
1080 		mutex_lock(&dev_priv->pcu_lock);
1081 
1082 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083 		seq_printf(m, "Video Turbo Mode: %s\n",
1084 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085 		seq_printf(m, "HW control enabled: %s\n",
1086 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1087 		seq_printf(m, "SW control enabled: %s\n",
1088 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089 				  GEN6_RP_MEDIA_SW_MODE));
1090 
1091 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094 
1095 		seq_printf(m, "actual GPU freq: %d MHz\n",
1096 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097 
1098 		seq_printf(m, "current GPU freq: %d MHz\n",
1099 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1100 
1101 		seq_printf(m, "max GPU freq: %d MHz\n",
1102 			   intel_gpu_freq(dev_priv, rps->max_freq));
1103 
1104 		seq_printf(m, "min GPU freq: %d MHz\n",
1105 			   intel_gpu_freq(dev_priv, rps->min_freq));
1106 
1107 		seq_printf(m, "idle GPU freq: %d MHz\n",
1108 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1109 
1110 		seq_printf(m,
1111 			   "efficient (RPe) frequency: %d MHz\n",
1112 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1113 		mutex_unlock(&dev_priv->pcu_lock);
1114 	} else if (INTEL_GEN(dev_priv) >= 6) {
1115 		u32 rp_state_limits;
1116 		u32 gt_perf_status;
1117 		u32 rp_state_cap;
1118 		u32 rpmodectl, rpinclimit, rpdeclimit;
1119 		u32 rpstat, cagf, reqf;
1120 		u32 rpupei, rpcurup, rpprevup;
1121 		u32 rpdownei, rpcurdown, rpprevdown;
1122 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1123 		int max_freq;
1124 
1125 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1126 		if (IS_GEN9_LP(dev_priv)) {
1127 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129 		} else {
1130 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132 		}
1133 
1134 		/* RPSTAT1 is in the GT power well */
1135 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1136 
1137 		reqf = I915_READ(GEN6_RPNSWREQ);
1138 		if (INTEL_GEN(dev_priv) >= 9)
1139 			reqf >>= 23;
1140 		else {
1141 			reqf &= ~GEN6_TURBO_DISABLE;
1142 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1143 				reqf >>= 24;
1144 			else
1145 				reqf >>= 25;
1146 		}
1147 		reqf = intel_gpu_freq(dev_priv, reqf);
1148 
1149 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152 
1153 		rpstat = I915_READ(GEN6_RPSTAT1);
1154 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1160 		cagf = intel_gpu_freq(dev_priv,
1161 				      intel_get_cagf(dev_priv, rpstat));
1162 
1163 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1164 
1165 		if (INTEL_GEN(dev_priv) >= 11) {
1166 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168 			/*
1169 			 * The equivalent to the PM ISR & IIR cannot be read
1170 			 * without affecting the current state of the system
1171 			 */
1172 			pm_isr = 0;
1173 			pm_iir = 0;
1174 		} else if (INTEL_GEN(dev_priv) >= 8) {
1175 			pm_ier = I915_READ(GEN8_GT_IER(2));
1176 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1177 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1178 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1179 		} else {
1180 			pm_ier = I915_READ(GEN6_PMIER);
1181 			pm_imr = I915_READ(GEN6_PMIMR);
1182 			pm_isr = I915_READ(GEN6_PMISR);
1183 			pm_iir = I915_READ(GEN6_PMIIR);
1184 		}
1185 		pm_mask = I915_READ(GEN6_PMINTRMSK);
1186 
1187 		seq_printf(m, "Video Turbo Mode: %s\n",
1188 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189 		seq_printf(m, "HW control enabled: %s\n",
1190 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1191 		seq_printf(m, "SW control enabled: %s\n",
1192 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193 				  GEN6_RP_MEDIA_SW_MODE));
1194 
1195 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196 			   pm_ier, pm_imr, pm_mask);
1197 		if (INTEL_GEN(dev_priv) <= 10)
1198 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199 				   pm_isr, pm_iir);
1200 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1201 			   rps->pm_intrmsk_mbz);
1202 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1203 		seq_printf(m, "Render p-state ratio: %d\n",
1204 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1205 		seq_printf(m, "Render p-state VID: %d\n",
1206 			   gt_perf_status & 0xff);
1207 		seq_printf(m, "Render p-state limit: %d\n",
1208 			   rp_state_limits & 0xff);
1209 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1213 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1214 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1215 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1221 		seq_printf(m, "Up threshold: %d%%\n",
1222 			   rps->power.up_threshold);
1223 
1224 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1225 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1226 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1227 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1228 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1229 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1230 		seq_printf(m, "Down threshold: %d%%\n",
1231 			   rps->power.down_threshold);
1232 
1233 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1234 			    rp_state_cap >> 16) & 0xff;
1235 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1236 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1237 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1238 			   intel_gpu_freq(dev_priv, max_freq));
1239 
1240 		max_freq = (rp_state_cap & 0xff00) >> 8;
1241 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1242 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1243 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1244 			   intel_gpu_freq(dev_priv, max_freq));
1245 
1246 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1247 			    rp_state_cap >> 0) & 0xff;
1248 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1249 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1250 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1251 			   intel_gpu_freq(dev_priv, max_freq));
1252 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1253 			   intel_gpu_freq(dev_priv, rps->max_freq));
1254 
1255 		seq_printf(m, "Current freq: %d MHz\n",
1256 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1257 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1258 		seq_printf(m, "Idle freq: %d MHz\n",
1259 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1260 		seq_printf(m, "Min freq: %d MHz\n",
1261 			   intel_gpu_freq(dev_priv, rps->min_freq));
1262 		seq_printf(m, "Boost freq: %d MHz\n",
1263 			   intel_gpu_freq(dev_priv, rps->boost_freq));
1264 		seq_printf(m, "Max freq: %d MHz\n",
1265 			   intel_gpu_freq(dev_priv, rps->max_freq));
1266 		seq_printf(m,
1267 			   "efficient (RPe) frequency: %d MHz\n",
1268 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1269 	} else {
1270 		seq_puts(m, "no P-state info available\n");
1271 	}
1272 
1273 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1274 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1275 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1276 
1277 	intel_runtime_pm_put(dev_priv);
1278 	return ret;
1279 }
1280 
1281 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1282 			       struct seq_file *m,
1283 			       struct intel_instdone *instdone)
1284 {
1285 	int slice;
1286 	int subslice;
1287 
1288 	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1289 		   instdone->instdone);
1290 
1291 	if (INTEL_GEN(dev_priv) <= 3)
1292 		return;
1293 
1294 	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1295 		   instdone->slice_common);
1296 
1297 	if (INTEL_GEN(dev_priv) <= 6)
1298 		return;
1299 
1300 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1301 		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302 			   slice, subslice, instdone->sampler[slice][subslice]);
1303 
1304 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1305 		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306 			   slice, subslice, instdone->row[slice][subslice]);
1307 }
1308 
1309 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1310 {
1311 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1312 	struct intel_engine_cs *engine;
1313 	u64 acthd[I915_NUM_ENGINES];
1314 	u32 seqno[I915_NUM_ENGINES];
1315 	struct intel_instdone instdone;
1316 	enum intel_engine_id id;
1317 
1318 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1319 		seq_puts(m, "Wedged\n");
1320 	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1321 		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1322 	if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1323 		seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1324 	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1325 		seq_puts(m, "Waiter holding struct mutex\n");
1326 	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1327 		seq_puts(m, "struct_mutex blocked for reset\n");
1328 
1329 	if (!i915_modparams.enable_hangcheck) {
1330 		seq_puts(m, "Hangcheck disabled\n");
1331 		return 0;
1332 	}
1333 
1334 	intel_runtime_pm_get(dev_priv);
1335 
1336 	for_each_engine(engine, dev_priv, id) {
1337 		acthd[id] = intel_engine_get_active_head(engine);
1338 		seqno[id] = intel_engine_get_seqno(engine);
1339 	}
1340 
1341 	intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1342 
1343 	intel_runtime_pm_put(dev_priv);
1344 
1345 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1346 		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1347 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1348 					    jiffies));
1349 	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1350 		seq_puts(m, "Hangcheck active, work pending\n");
1351 	else
1352 		seq_puts(m, "Hangcheck inactive\n");
1353 
1354 	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1355 
1356 	for_each_engine(engine, dev_priv, id) {
1357 		struct intel_breadcrumbs *b = &engine->breadcrumbs;
1358 		struct rb_node *rb;
1359 
1360 		seq_printf(m, "%s:\n", engine->name);
1361 		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1362 			   engine->hangcheck.seqno, seqno[id],
1363 			   intel_engine_last_submit(engine));
1364 		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1365 			   yesno(intel_engine_has_waiter(engine)),
1366 			   yesno(test_bit(engine->id,
1367 					  &dev_priv->gpu_error.missed_irq_rings)),
1368 			   yesno(engine->hangcheck.stalled),
1369 			   yesno(engine->hangcheck.wedged));
1370 
1371 		spin_lock_irq(&b->rb_lock);
1372 		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1373 			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1374 
1375 			seq_printf(m, "\t%s [%d] waiting for %x\n",
1376 				   w->tsk->comm, w->tsk->pid, w->seqno);
1377 		}
1378 		spin_unlock_irq(&b->rb_lock);
1379 
1380 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1381 			   (long long)engine->hangcheck.acthd,
1382 			   (long long)acthd[id]);
1383 		seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1384 			   hangcheck_action_to_str(engine->hangcheck.action),
1385 			   engine->hangcheck.action,
1386 			   jiffies_to_msecs(jiffies -
1387 					    engine->hangcheck.action_timestamp));
1388 
1389 		if (engine->id == RCS) {
1390 			seq_puts(m, "\tinstdone read =\n");
1391 
1392 			i915_instdone_info(dev_priv, m, &instdone);
1393 
1394 			seq_puts(m, "\tinstdone accu =\n");
1395 
1396 			i915_instdone_info(dev_priv, m,
1397 					   &engine->hangcheck.instdone);
1398 		}
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 static int i915_reset_info(struct seq_file *m, void *unused)
1405 {
1406 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1407 	struct i915_gpu_error *error = &dev_priv->gpu_error;
1408 	struct intel_engine_cs *engine;
1409 	enum intel_engine_id id;
1410 
1411 	seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1412 
1413 	for_each_engine(engine, dev_priv, id) {
1414 		seq_printf(m, "%s = %u\n", engine->name,
1415 			   i915_reset_engine_count(error, engine));
1416 	}
1417 
1418 	return 0;
1419 }
1420 
1421 static int ironlake_drpc_info(struct seq_file *m)
1422 {
1423 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1424 	u32 rgvmodectl, rstdbyctl;
1425 	u16 crstandvid;
1426 
1427 	rgvmodectl = I915_READ(MEMMODECTL);
1428 	rstdbyctl = I915_READ(RSTDBYCTL);
1429 	crstandvid = I915_READ16(CRSTANDVID);
1430 
1431 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1432 	seq_printf(m, "Boost freq: %d\n",
1433 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1434 		   MEMMODE_BOOST_FREQ_SHIFT);
1435 	seq_printf(m, "HW control enabled: %s\n",
1436 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1437 	seq_printf(m, "SW control enabled: %s\n",
1438 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1439 	seq_printf(m, "Gated voltage change: %s\n",
1440 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1441 	seq_printf(m, "Starting frequency: P%d\n",
1442 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1443 	seq_printf(m, "Max P-state: P%d\n",
1444 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1445 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1446 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1447 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1448 	seq_printf(m, "Render standby enabled: %s\n",
1449 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1450 	seq_puts(m, "Current RS state: ");
1451 	switch (rstdbyctl & RSX_STATUS_MASK) {
1452 	case RSX_STATUS_ON:
1453 		seq_puts(m, "on\n");
1454 		break;
1455 	case RSX_STATUS_RC1:
1456 		seq_puts(m, "RC1\n");
1457 		break;
1458 	case RSX_STATUS_RC1E:
1459 		seq_puts(m, "RC1E\n");
1460 		break;
1461 	case RSX_STATUS_RS1:
1462 		seq_puts(m, "RS1\n");
1463 		break;
1464 	case RSX_STATUS_RS2:
1465 		seq_puts(m, "RS2 (RC6)\n");
1466 		break;
1467 	case RSX_STATUS_RS3:
1468 		seq_puts(m, "RC3 (RC6+)\n");
1469 		break;
1470 	default:
1471 		seq_puts(m, "unknown\n");
1472 		break;
1473 	}
1474 
1475 	return 0;
1476 }
1477 
1478 static int i915_forcewake_domains(struct seq_file *m, void *data)
1479 {
1480 	struct drm_i915_private *i915 = node_to_i915(m->private);
1481 	struct intel_uncore_forcewake_domain *fw_domain;
1482 	unsigned int tmp;
1483 
1484 	seq_printf(m, "user.bypass_count = %u\n",
1485 		   i915->uncore.user_forcewake.count);
1486 
1487 	for_each_fw_domain(fw_domain, i915, tmp)
1488 		seq_printf(m, "%s.wake_count = %u\n",
1489 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1490 			   READ_ONCE(fw_domain->wake_count));
1491 
1492 	return 0;
1493 }
1494 
1495 static void print_rc6_res(struct seq_file *m,
1496 			  const char *title,
1497 			  const i915_reg_t reg)
1498 {
1499 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1500 
1501 	seq_printf(m, "%s %u (%llu us)\n",
1502 		   title, I915_READ(reg),
1503 		   intel_rc6_residency_us(dev_priv, reg));
1504 }
1505 
1506 static int vlv_drpc_info(struct seq_file *m)
1507 {
1508 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1509 	u32 rcctl1, pw_status;
1510 
1511 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1512 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1513 
1514 	seq_printf(m, "RC6 Enabled: %s\n",
1515 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1516 					GEN6_RC_CTL_EI_MODE(1))));
1517 	seq_printf(m, "Render Power Well: %s\n",
1518 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1519 	seq_printf(m, "Media Power Well: %s\n",
1520 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1521 
1522 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1523 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1524 
1525 	return i915_forcewake_domains(m, NULL);
1526 }
1527 
1528 static int gen6_drpc_info(struct seq_file *m)
1529 {
1530 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1531 	u32 gt_core_status, rcctl1, rc6vids = 0;
1532 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1533 
1534 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1535 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1536 
1537 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1538 	if (INTEL_GEN(dev_priv) >= 9) {
1539 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1540 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1541 	}
1542 
1543 	if (INTEL_GEN(dev_priv) <= 7) {
1544 		mutex_lock(&dev_priv->pcu_lock);
1545 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1546 				       &rc6vids);
1547 		mutex_unlock(&dev_priv->pcu_lock);
1548 	}
1549 
1550 	seq_printf(m, "RC1e Enabled: %s\n",
1551 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1552 	seq_printf(m, "RC6 Enabled: %s\n",
1553 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1554 	if (INTEL_GEN(dev_priv) >= 9) {
1555 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1556 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1557 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1558 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1559 	}
1560 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1561 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1562 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1563 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1564 	seq_puts(m, "Current RC state: ");
1565 	switch (gt_core_status & GEN6_RCn_MASK) {
1566 	case GEN6_RC0:
1567 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1568 			seq_puts(m, "Core Power Down\n");
1569 		else
1570 			seq_puts(m, "on\n");
1571 		break;
1572 	case GEN6_RC3:
1573 		seq_puts(m, "RC3\n");
1574 		break;
1575 	case GEN6_RC6:
1576 		seq_puts(m, "RC6\n");
1577 		break;
1578 	case GEN6_RC7:
1579 		seq_puts(m, "RC7\n");
1580 		break;
1581 	default:
1582 		seq_puts(m, "Unknown\n");
1583 		break;
1584 	}
1585 
1586 	seq_printf(m, "Core Power Down: %s\n",
1587 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1588 	if (INTEL_GEN(dev_priv) >= 9) {
1589 		seq_printf(m, "Render Power Well: %s\n",
1590 			(gen9_powergate_status &
1591 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1592 		seq_printf(m, "Media Power Well: %s\n",
1593 			(gen9_powergate_status &
1594 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1595 	}
1596 
1597 	/* Not exactly sure what this is */
1598 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1599 		      GEN6_GT_GFX_RC6_LOCKED);
1600 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1601 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1602 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1603 
1604 	if (INTEL_GEN(dev_priv) <= 7) {
1605 		seq_printf(m, "RC6   voltage: %dmV\n",
1606 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1607 		seq_printf(m, "RC6+  voltage: %dmV\n",
1608 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1609 		seq_printf(m, "RC6++ voltage: %dmV\n",
1610 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1611 	}
1612 
1613 	return i915_forcewake_domains(m, NULL);
1614 }
1615 
1616 static int i915_drpc_info(struct seq_file *m, void *unused)
1617 {
1618 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1619 	int err;
1620 
1621 	intel_runtime_pm_get(dev_priv);
1622 
1623 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1624 		err = vlv_drpc_info(m);
1625 	else if (INTEL_GEN(dev_priv) >= 6)
1626 		err = gen6_drpc_info(m);
1627 	else
1628 		err = ironlake_drpc_info(m);
1629 
1630 	intel_runtime_pm_put(dev_priv);
1631 
1632 	return err;
1633 }
1634 
1635 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1636 {
1637 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1638 
1639 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640 		   dev_priv->fb_tracking.busy_bits);
1641 
1642 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643 		   dev_priv->fb_tracking.flip_bits);
1644 
1645 	return 0;
1646 }
1647 
1648 static int i915_fbc_status(struct seq_file *m, void *unused)
1649 {
1650 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1651 	struct intel_fbc *fbc = &dev_priv->fbc;
1652 
1653 	if (!HAS_FBC(dev_priv))
1654 		return -ENODEV;
1655 
1656 	intel_runtime_pm_get(dev_priv);
1657 	mutex_lock(&fbc->lock);
1658 
1659 	if (intel_fbc_is_active(dev_priv))
1660 		seq_puts(m, "FBC enabled\n");
1661 	else
1662 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1663 
1664 	if (intel_fbc_is_active(dev_priv)) {
1665 		u32 mask;
1666 
1667 		if (INTEL_GEN(dev_priv) >= 8)
1668 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1669 		else if (INTEL_GEN(dev_priv) >= 7)
1670 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1671 		else if (INTEL_GEN(dev_priv) >= 5)
1672 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1673 		else if (IS_G4X(dev_priv))
1674 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1675 		else
1676 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1677 							FBC_STAT_COMPRESSED);
1678 
1679 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1680 	}
1681 
1682 	mutex_unlock(&fbc->lock);
1683 	intel_runtime_pm_put(dev_priv);
1684 
1685 	return 0;
1686 }
1687 
1688 static int i915_fbc_false_color_get(void *data, u64 *val)
1689 {
1690 	struct drm_i915_private *dev_priv = data;
1691 
1692 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1693 		return -ENODEV;
1694 
1695 	*val = dev_priv->fbc.false_color;
1696 
1697 	return 0;
1698 }
1699 
1700 static int i915_fbc_false_color_set(void *data, u64 val)
1701 {
1702 	struct drm_i915_private *dev_priv = data;
1703 	u32 reg;
1704 
1705 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1706 		return -ENODEV;
1707 
1708 	mutex_lock(&dev_priv->fbc.lock);
1709 
1710 	reg = I915_READ(ILK_DPFC_CONTROL);
1711 	dev_priv->fbc.false_color = val;
1712 
1713 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1714 		   (reg | FBC_CTL_FALSE_COLOR) :
1715 		   (reg & ~FBC_CTL_FALSE_COLOR));
1716 
1717 	mutex_unlock(&dev_priv->fbc.lock);
1718 	return 0;
1719 }
1720 
1721 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1722 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1723 			"%llu\n");
1724 
1725 static int i915_ips_status(struct seq_file *m, void *unused)
1726 {
1727 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1728 
1729 	if (!HAS_IPS(dev_priv))
1730 		return -ENODEV;
1731 
1732 	intel_runtime_pm_get(dev_priv);
1733 
1734 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1735 		   yesno(i915_modparams.enable_ips));
1736 
1737 	if (INTEL_GEN(dev_priv) >= 8) {
1738 		seq_puts(m, "Currently: unknown\n");
1739 	} else {
1740 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1741 			seq_puts(m, "Currently: enabled\n");
1742 		else
1743 			seq_puts(m, "Currently: disabled\n");
1744 	}
1745 
1746 	intel_runtime_pm_put(dev_priv);
1747 
1748 	return 0;
1749 }
1750 
1751 static int i915_sr_status(struct seq_file *m, void *unused)
1752 {
1753 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1754 	bool sr_enabled = false;
1755 
1756 	intel_runtime_pm_get(dev_priv);
1757 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1758 
1759 	if (INTEL_GEN(dev_priv) >= 9)
1760 		/* no global SR status; inspect per-plane WM */;
1761 	else if (HAS_PCH_SPLIT(dev_priv))
1762 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1763 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1764 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1765 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1766 	else if (IS_I915GM(dev_priv))
1767 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1768 	else if (IS_PINEVIEW(dev_priv))
1769 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1770 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1771 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1772 
1773 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1774 	intel_runtime_pm_put(dev_priv);
1775 
1776 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1777 
1778 	return 0;
1779 }
1780 
1781 static int i915_emon_status(struct seq_file *m, void *unused)
1782 {
1783 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784 	struct drm_device *dev = &dev_priv->drm;
1785 	unsigned long temp, chipset, gfx;
1786 	int ret;
1787 
1788 	if (!IS_GEN5(dev_priv))
1789 		return -ENODEV;
1790 
1791 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1792 	if (ret)
1793 		return ret;
1794 
1795 	temp = i915_mch_val(dev_priv);
1796 	chipset = i915_chipset_val(dev_priv);
1797 	gfx = i915_gfx_val(dev_priv);
1798 	mutex_unlock(&dev->struct_mutex);
1799 
1800 	seq_printf(m, "GMCH temp: %ld\n", temp);
1801 	seq_printf(m, "Chipset power: %ld\n", chipset);
1802 	seq_printf(m, "GFX power: %ld\n", gfx);
1803 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1804 
1805 	return 0;
1806 }
1807 
1808 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1809 {
1810 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1811 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1812 	unsigned int max_gpu_freq, min_gpu_freq;
1813 	int gpu_freq, ia_freq;
1814 	int ret;
1815 
1816 	if (!HAS_LLC(dev_priv))
1817 		return -ENODEV;
1818 
1819 	intel_runtime_pm_get(dev_priv);
1820 
1821 	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1822 	if (ret)
1823 		goto out;
1824 
1825 	min_gpu_freq = rps->min_freq;
1826 	max_gpu_freq = rps->max_freq;
1827 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1828 		/* Convert GT frequency to 50 HZ units */
1829 		min_gpu_freq /= GEN9_FREQ_SCALER;
1830 		max_gpu_freq /= GEN9_FREQ_SCALER;
1831 	}
1832 
1833 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1834 
1835 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1836 		ia_freq = gpu_freq;
1837 		sandybridge_pcode_read(dev_priv,
1838 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1839 				       &ia_freq);
1840 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1841 			   intel_gpu_freq(dev_priv, (gpu_freq *
1842 						     (IS_GEN9_BC(dev_priv) ||
1843 						      INTEL_GEN(dev_priv) >= 10 ?
1844 						      GEN9_FREQ_SCALER : 1))),
1845 			   ((ia_freq >> 0) & 0xff) * 100,
1846 			   ((ia_freq >> 8) & 0xff) * 100);
1847 	}
1848 
1849 	mutex_unlock(&dev_priv->pcu_lock);
1850 
1851 out:
1852 	intel_runtime_pm_put(dev_priv);
1853 	return ret;
1854 }
1855 
1856 static int i915_opregion(struct seq_file *m, void *unused)
1857 {
1858 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1859 	struct drm_device *dev = &dev_priv->drm;
1860 	struct intel_opregion *opregion = &dev_priv->opregion;
1861 	int ret;
1862 
1863 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1864 	if (ret)
1865 		goto out;
1866 
1867 	if (opregion->header)
1868 		seq_write(m, opregion->header, OPREGION_SIZE);
1869 
1870 	mutex_unlock(&dev->struct_mutex);
1871 
1872 out:
1873 	return 0;
1874 }
1875 
1876 static int i915_vbt(struct seq_file *m, void *unused)
1877 {
1878 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1879 
1880 	if (opregion->vbt)
1881 		seq_write(m, opregion->vbt, opregion->vbt_size);
1882 
1883 	return 0;
1884 }
1885 
1886 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1887 {
1888 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1889 	struct drm_device *dev = &dev_priv->drm;
1890 	struct intel_framebuffer *fbdev_fb = NULL;
1891 	struct drm_framebuffer *drm_fb;
1892 	int ret;
1893 
1894 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1895 	if (ret)
1896 		return ret;
1897 
1898 #ifdef CONFIG_DRM_FBDEV_EMULATION
1899 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1900 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1901 
1902 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1903 			   fbdev_fb->base.width,
1904 			   fbdev_fb->base.height,
1905 			   fbdev_fb->base.format->depth,
1906 			   fbdev_fb->base.format->cpp[0] * 8,
1907 			   fbdev_fb->base.modifier,
1908 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1909 		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1910 		seq_putc(m, '\n');
1911 	}
1912 #endif
1913 
1914 	mutex_lock(&dev->mode_config.fb_lock);
1915 	drm_for_each_fb(drm_fb, dev) {
1916 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1917 		if (fb == fbdev_fb)
1918 			continue;
1919 
1920 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1921 			   fb->base.width,
1922 			   fb->base.height,
1923 			   fb->base.format->depth,
1924 			   fb->base.format->cpp[0] * 8,
1925 			   fb->base.modifier,
1926 			   drm_framebuffer_read_refcount(&fb->base));
1927 		describe_obj(m, intel_fb_obj(&fb->base));
1928 		seq_putc(m, '\n');
1929 	}
1930 	mutex_unlock(&dev->mode_config.fb_lock);
1931 	mutex_unlock(&dev->struct_mutex);
1932 
1933 	return 0;
1934 }
1935 
1936 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1937 {
1938 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1939 		   ring->space, ring->head, ring->tail, ring->emit);
1940 }
1941 
1942 static int i915_context_status(struct seq_file *m, void *unused)
1943 {
1944 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1945 	struct drm_device *dev = &dev_priv->drm;
1946 	struct intel_engine_cs *engine;
1947 	struct i915_gem_context *ctx;
1948 	enum intel_engine_id id;
1949 	int ret;
1950 
1951 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1952 	if (ret)
1953 		return ret;
1954 
1955 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1956 		seq_puts(m, "HW context ");
1957 		if (!list_empty(&ctx->hw_id_link))
1958 			seq_printf(m, "%x [pin %u]", ctx->hw_id,
1959 				   atomic_read(&ctx->hw_id_pin_count));
1960 		if (ctx->pid) {
1961 			struct task_struct *task;
1962 
1963 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1964 			if (task) {
1965 				seq_printf(m, "(%s [%d]) ",
1966 					   task->comm, task->pid);
1967 				put_task_struct(task);
1968 			}
1969 		} else if (IS_ERR(ctx->file_priv)) {
1970 			seq_puts(m, "(deleted) ");
1971 		} else {
1972 			seq_puts(m, "(kernel) ");
1973 		}
1974 
1975 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1976 		seq_putc(m, '\n');
1977 
1978 		for_each_engine(engine, dev_priv, id) {
1979 			struct intel_context *ce =
1980 				to_intel_context(ctx, engine);
1981 
1982 			seq_printf(m, "%s: ", engine->name);
1983 			if (ce->state)
1984 				describe_obj(m, ce->state->obj);
1985 			if (ce->ring)
1986 				describe_ctx_ring(m, ce->ring);
1987 			seq_putc(m, '\n');
1988 		}
1989 
1990 		seq_putc(m, '\n');
1991 	}
1992 
1993 	mutex_unlock(&dev->struct_mutex);
1994 
1995 	return 0;
1996 }
1997 
1998 static const char *swizzle_string(unsigned swizzle)
1999 {
2000 	switch (swizzle) {
2001 	case I915_BIT_6_SWIZZLE_NONE:
2002 		return "none";
2003 	case I915_BIT_6_SWIZZLE_9:
2004 		return "bit9";
2005 	case I915_BIT_6_SWIZZLE_9_10:
2006 		return "bit9/bit10";
2007 	case I915_BIT_6_SWIZZLE_9_11:
2008 		return "bit9/bit11";
2009 	case I915_BIT_6_SWIZZLE_9_10_11:
2010 		return "bit9/bit10/bit11";
2011 	case I915_BIT_6_SWIZZLE_9_17:
2012 		return "bit9/bit17";
2013 	case I915_BIT_6_SWIZZLE_9_10_17:
2014 		return "bit9/bit10/bit17";
2015 	case I915_BIT_6_SWIZZLE_UNKNOWN:
2016 		return "unknown";
2017 	}
2018 
2019 	return "bug";
2020 }
2021 
2022 static int i915_swizzle_info(struct seq_file *m, void *data)
2023 {
2024 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2025 
2026 	intel_runtime_pm_get(dev_priv);
2027 
2028 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2029 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2030 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2031 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2032 
2033 	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2034 		seq_printf(m, "DDC = 0x%08x\n",
2035 			   I915_READ(DCC));
2036 		seq_printf(m, "DDC2 = 0x%08x\n",
2037 			   I915_READ(DCC2));
2038 		seq_printf(m, "C0DRB3 = 0x%04x\n",
2039 			   I915_READ16(C0DRB3));
2040 		seq_printf(m, "C1DRB3 = 0x%04x\n",
2041 			   I915_READ16(C1DRB3));
2042 	} else if (INTEL_GEN(dev_priv) >= 6) {
2043 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2044 			   I915_READ(MAD_DIMM_C0));
2045 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2046 			   I915_READ(MAD_DIMM_C1));
2047 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2048 			   I915_READ(MAD_DIMM_C2));
2049 		seq_printf(m, "TILECTL = 0x%08x\n",
2050 			   I915_READ(TILECTL));
2051 		if (INTEL_GEN(dev_priv) >= 8)
2052 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2053 				   I915_READ(GAMTARBMODE));
2054 		else
2055 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2056 				   I915_READ(ARB_MODE));
2057 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2058 			   I915_READ(DISP_ARB_CTL));
2059 	}
2060 
2061 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2062 		seq_puts(m, "L-shaped memory detected\n");
2063 
2064 	intel_runtime_pm_put(dev_priv);
2065 
2066 	return 0;
2067 }
2068 
2069 static int per_file_ctx(int id, void *ptr, void *data)
2070 {
2071 	struct i915_gem_context *ctx = ptr;
2072 	struct seq_file *m = data;
2073 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2074 
2075 	if (!ppgtt) {
2076 		seq_printf(m, "  no ppgtt for context %d\n",
2077 			   ctx->user_handle);
2078 		return 0;
2079 	}
2080 
2081 	if (i915_gem_context_is_default(ctx))
2082 		seq_puts(m, "  default context:\n");
2083 	else
2084 		seq_printf(m, "  context %d:\n", ctx->user_handle);
2085 	ppgtt->debug_dump(ppgtt, m);
2086 
2087 	return 0;
2088 }
2089 
2090 static void gen8_ppgtt_info(struct seq_file *m,
2091 			    struct drm_i915_private *dev_priv)
2092 {
2093 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2094 	struct intel_engine_cs *engine;
2095 	enum intel_engine_id id;
2096 	int i;
2097 
2098 	if (!ppgtt)
2099 		return;
2100 
2101 	for_each_engine(engine, dev_priv, id) {
2102 		seq_printf(m, "%s\n", engine->name);
2103 		for (i = 0; i < 4; i++) {
2104 			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2105 			pdp <<= 32;
2106 			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2107 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2108 		}
2109 	}
2110 }
2111 
2112 static void gen6_ppgtt_info(struct seq_file *m,
2113 			    struct drm_i915_private *dev_priv)
2114 {
2115 	struct intel_engine_cs *engine;
2116 	enum intel_engine_id id;
2117 
2118 	if (IS_GEN6(dev_priv))
2119 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2120 
2121 	for_each_engine(engine, dev_priv, id) {
2122 		seq_printf(m, "%s\n", engine->name);
2123 		if (IS_GEN7(dev_priv))
2124 			seq_printf(m, "GFX_MODE: 0x%08x\n",
2125 				   I915_READ(RING_MODE_GEN7(engine)));
2126 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2127 			   I915_READ(RING_PP_DIR_BASE(engine)));
2128 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2129 			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
2130 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2131 			   I915_READ(RING_PP_DIR_DCLV(engine)));
2132 	}
2133 	if (dev_priv->mm.aliasing_ppgtt) {
2134 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2135 
2136 		seq_puts(m, "aliasing PPGTT:\n");
2137 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2138 
2139 		ppgtt->debug_dump(ppgtt, m);
2140 	}
2141 
2142 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2143 }
2144 
2145 static int i915_ppgtt_info(struct seq_file *m, void *data)
2146 {
2147 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2148 	struct drm_device *dev = &dev_priv->drm;
2149 	struct drm_file *file;
2150 	int ret;
2151 
2152 	mutex_lock(&dev->filelist_mutex);
2153 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2154 	if (ret)
2155 		goto out_unlock;
2156 
2157 	intel_runtime_pm_get(dev_priv);
2158 
2159 	if (INTEL_GEN(dev_priv) >= 8)
2160 		gen8_ppgtt_info(m, dev_priv);
2161 	else if (INTEL_GEN(dev_priv) >= 6)
2162 		gen6_ppgtt_info(m, dev_priv);
2163 
2164 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2165 		struct drm_i915_file_private *file_priv = file->driver_priv;
2166 		struct task_struct *task;
2167 
2168 		task = get_pid_task(file->pid, PIDTYPE_PID);
2169 		if (!task) {
2170 			ret = -ESRCH;
2171 			goto out_rpm;
2172 		}
2173 		seq_printf(m, "\nproc: %s\n", task->comm);
2174 		put_task_struct(task);
2175 		idr_for_each(&file_priv->context_idr, per_file_ctx,
2176 			     (void *)(unsigned long)m);
2177 	}
2178 
2179 out_rpm:
2180 	intel_runtime_pm_put(dev_priv);
2181 	mutex_unlock(&dev->struct_mutex);
2182 out_unlock:
2183 	mutex_unlock(&dev->filelist_mutex);
2184 	return ret;
2185 }
2186 
2187 static int count_irq_waiters(struct drm_i915_private *i915)
2188 {
2189 	struct intel_engine_cs *engine;
2190 	enum intel_engine_id id;
2191 	int count = 0;
2192 
2193 	for_each_engine(engine, i915, id)
2194 		count += intel_engine_has_waiter(engine);
2195 
2196 	return count;
2197 }
2198 
2199 static const char *rps_power_to_str(unsigned int power)
2200 {
2201 	static const char * const strings[] = {
2202 		[LOW_POWER] = "low power",
2203 		[BETWEEN] = "mixed",
2204 		[HIGH_POWER] = "high power",
2205 	};
2206 
2207 	if (power >= ARRAY_SIZE(strings) || !strings[power])
2208 		return "unknown";
2209 
2210 	return strings[power];
2211 }
2212 
2213 static int i915_rps_boost_info(struct seq_file *m, void *data)
2214 {
2215 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2216 	struct drm_device *dev = &dev_priv->drm;
2217 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
2218 	u32 act_freq = rps->cur_freq;
2219 	struct drm_file *file;
2220 
2221 	if (intel_runtime_pm_get_if_in_use(dev_priv)) {
2222 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2223 			mutex_lock(&dev_priv->pcu_lock);
2224 			act_freq = vlv_punit_read(dev_priv,
2225 						  PUNIT_REG_GPU_FREQ_STS);
2226 			act_freq = (act_freq >> 8) & 0xff;
2227 			mutex_unlock(&dev_priv->pcu_lock);
2228 		} else {
2229 			act_freq = intel_get_cagf(dev_priv,
2230 						  I915_READ(GEN6_RPSTAT1));
2231 		}
2232 		intel_runtime_pm_put(dev_priv);
2233 	}
2234 
2235 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2236 	seq_printf(m, "GPU busy? %s [%d requests]\n",
2237 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2238 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2239 	seq_printf(m, "Boosts outstanding? %d\n",
2240 		   atomic_read(&rps->num_waiters));
2241 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2242 	seq_printf(m, "Frequency requested %d, actual %d\n",
2243 		   intel_gpu_freq(dev_priv, rps->cur_freq),
2244 		   intel_gpu_freq(dev_priv, act_freq));
2245 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2246 		   intel_gpu_freq(dev_priv, rps->min_freq),
2247 		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2248 		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2249 		   intel_gpu_freq(dev_priv, rps->max_freq));
2250 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2251 		   intel_gpu_freq(dev_priv, rps->idle_freq),
2252 		   intel_gpu_freq(dev_priv, rps->efficient_freq),
2253 		   intel_gpu_freq(dev_priv, rps->boost_freq));
2254 
2255 	mutex_lock(&dev->filelist_mutex);
2256 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2257 		struct drm_i915_file_private *file_priv = file->driver_priv;
2258 		struct task_struct *task;
2259 
2260 		rcu_read_lock();
2261 		task = pid_task(file->pid, PIDTYPE_PID);
2262 		seq_printf(m, "%s [%d]: %d boosts\n",
2263 			   task ? task->comm : "<unknown>",
2264 			   task ? task->pid : -1,
2265 			   atomic_read(&file_priv->rps_client.boosts));
2266 		rcu_read_unlock();
2267 	}
2268 	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2269 		   atomic_read(&rps->boosts));
2270 	mutex_unlock(&dev->filelist_mutex);
2271 
2272 	if (INTEL_GEN(dev_priv) >= 6 &&
2273 	    rps->enabled &&
2274 	    dev_priv->gt.active_requests) {
2275 		u32 rpup, rpupei;
2276 		u32 rpdown, rpdownei;
2277 
2278 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2279 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2280 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2281 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2282 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2283 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2284 
2285 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2286 			   rps_power_to_str(rps->power.mode));
2287 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2288 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
2289 			   rps->power.up_threshold);
2290 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2291 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2292 			   rps->power.down_threshold);
2293 	} else {
2294 		seq_puts(m, "\nRPS Autotuning inactive\n");
2295 	}
2296 
2297 	return 0;
2298 }
2299 
2300 static int i915_llc(struct seq_file *m, void *data)
2301 {
2302 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2303 	const bool edram = INTEL_GEN(dev_priv) > 8;
2304 
2305 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2306 	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2307 		   intel_uncore_edram_size(dev_priv)/1024/1024);
2308 
2309 	return 0;
2310 }
2311 
2312 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2313 {
2314 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2315 	struct drm_printer p;
2316 
2317 	if (!HAS_HUC(dev_priv))
2318 		return -ENODEV;
2319 
2320 	p = drm_seq_file_printer(m);
2321 	intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2322 
2323 	intel_runtime_pm_get(dev_priv);
2324 	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2325 	intel_runtime_pm_put(dev_priv);
2326 
2327 	return 0;
2328 }
2329 
2330 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2331 {
2332 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2333 	struct drm_printer p;
2334 	u32 tmp, i;
2335 
2336 	if (!HAS_GUC(dev_priv))
2337 		return -ENODEV;
2338 
2339 	p = drm_seq_file_printer(m);
2340 	intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2341 
2342 	intel_runtime_pm_get(dev_priv);
2343 
2344 	tmp = I915_READ(GUC_STATUS);
2345 
2346 	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2347 	seq_printf(m, "\tBootrom status = 0x%x\n",
2348 		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2349 	seq_printf(m, "\tuKernel status = 0x%x\n",
2350 		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2351 	seq_printf(m, "\tMIA Core status = 0x%x\n",
2352 		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2353 	seq_puts(m, "\nScratch registers:\n");
2354 	for (i = 0; i < 16; i++)
2355 		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2356 
2357 	intel_runtime_pm_put(dev_priv);
2358 
2359 	return 0;
2360 }
2361 
2362 static const char *
2363 stringify_guc_log_type(enum guc_log_buffer_type type)
2364 {
2365 	switch (type) {
2366 	case GUC_ISR_LOG_BUFFER:
2367 		return "ISR";
2368 	case GUC_DPC_LOG_BUFFER:
2369 		return "DPC";
2370 	case GUC_CRASH_DUMP_LOG_BUFFER:
2371 		return "CRASH";
2372 	default:
2373 		MISSING_CASE(type);
2374 	}
2375 
2376 	return "";
2377 }
2378 
2379 static void i915_guc_log_info(struct seq_file *m,
2380 			      struct drm_i915_private *dev_priv)
2381 {
2382 	struct intel_guc_log *log = &dev_priv->guc.log;
2383 	enum guc_log_buffer_type type;
2384 
2385 	if (!intel_guc_log_relay_enabled(log)) {
2386 		seq_puts(m, "GuC log relay disabled\n");
2387 		return;
2388 	}
2389 
2390 	seq_puts(m, "GuC logging stats:\n");
2391 
2392 	seq_printf(m, "\tRelay full count: %u\n",
2393 		   log->relay.full_count);
2394 
2395 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2396 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2397 			   stringify_guc_log_type(type),
2398 			   log->stats[type].flush,
2399 			   log->stats[type].sampled_overflow);
2400 	}
2401 }
2402 
2403 static void i915_guc_client_info(struct seq_file *m,
2404 				 struct drm_i915_private *dev_priv,
2405 				 struct intel_guc_client *client)
2406 {
2407 	struct intel_engine_cs *engine;
2408 	enum intel_engine_id id;
2409 	uint64_t tot = 0;
2410 
2411 	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2412 		client->priority, client->stage_id, client->proc_desc_offset);
2413 	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2414 		client->doorbell_id, client->doorbell_offset);
2415 
2416 	for_each_engine(engine, dev_priv, id) {
2417 		u64 submissions = client->submissions[id];
2418 		tot += submissions;
2419 		seq_printf(m, "\tSubmissions: %llu %s\n",
2420 				submissions, engine->name);
2421 	}
2422 	seq_printf(m, "\tTotal: %llu\n", tot);
2423 }
2424 
2425 static int i915_guc_info(struct seq_file *m, void *data)
2426 {
2427 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2428 	const struct intel_guc *guc = &dev_priv->guc;
2429 
2430 	if (!USES_GUC(dev_priv))
2431 		return -ENODEV;
2432 
2433 	i915_guc_log_info(m, dev_priv);
2434 
2435 	if (!USES_GUC_SUBMISSION(dev_priv))
2436 		return 0;
2437 
2438 	GEM_BUG_ON(!guc->execbuf_client);
2439 
2440 	seq_printf(m, "\nDoorbell map:\n");
2441 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2442 	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2443 
2444 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2445 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2446 	if (guc->preempt_client) {
2447 		seq_printf(m, "\nGuC preempt client @ %p:\n",
2448 			   guc->preempt_client);
2449 		i915_guc_client_info(m, dev_priv, guc->preempt_client);
2450 	}
2451 
2452 	/* Add more as required ... */
2453 
2454 	return 0;
2455 }
2456 
2457 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2458 {
2459 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2460 	const struct intel_guc *guc = &dev_priv->guc;
2461 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2462 	struct intel_guc_client *client = guc->execbuf_client;
2463 	unsigned int tmp;
2464 	int index;
2465 
2466 	if (!USES_GUC_SUBMISSION(dev_priv))
2467 		return -ENODEV;
2468 
2469 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2470 		struct intel_engine_cs *engine;
2471 
2472 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2473 			continue;
2474 
2475 		seq_printf(m, "GuC stage descriptor %u:\n", index);
2476 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2477 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2478 		seq_printf(m, "\tPriority: %d\n", desc->priority);
2479 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2480 		seq_printf(m, "\tEngines used: 0x%x\n",
2481 			   desc->engines_used);
2482 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2483 			   desc->db_trigger_phy,
2484 			   desc->db_trigger_cpu,
2485 			   desc->db_trigger_uk);
2486 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
2487 			   desc->process_desc);
2488 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2489 			   desc->wq_addr, desc->wq_size);
2490 		seq_putc(m, '\n');
2491 
2492 		for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2493 			u32 guc_engine_id = engine->guc_id;
2494 			struct guc_execlist_context *lrc =
2495 						&desc->lrc[guc_engine_id];
2496 
2497 			seq_printf(m, "\t%s LRC:\n", engine->name);
2498 			seq_printf(m, "\t\tContext desc: 0x%x\n",
2499 				   lrc->context_desc);
2500 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2501 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2502 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2503 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2504 			seq_putc(m, '\n');
2505 		}
2506 	}
2507 
2508 	return 0;
2509 }
2510 
2511 static int i915_guc_log_dump(struct seq_file *m, void *data)
2512 {
2513 	struct drm_info_node *node = m->private;
2514 	struct drm_i915_private *dev_priv = node_to_i915(node);
2515 	bool dump_load_err = !!node->info_ent->data;
2516 	struct drm_i915_gem_object *obj = NULL;
2517 	u32 *log;
2518 	int i = 0;
2519 
2520 	if (!HAS_GUC(dev_priv))
2521 		return -ENODEV;
2522 
2523 	if (dump_load_err)
2524 		obj = dev_priv->guc.load_err_log;
2525 	else if (dev_priv->guc.log.vma)
2526 		obj = dev_priv->guc.log.vma->obj;
2527 
2528 	if (!obj)
2529 		return 0;
2530 
2531 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2532 	if (IS_ERR(log)) {
2533 		DRM_DEBUG("Failed to pin object\n");
2534 		seq_puts(m, "(log data unaccessible)\n");
2535 		return PTR_ERR(log);
2536 	}
2537 
2538 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2539 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2540 			   *(log + i), *(log + i + 1),
2541 			   *(log + i + 2), *(log + i + 3));
2542 
2543 	seq_putc(m, '\n');
2544 
2545 	i915_gem_object_unpin_map(obj);
2546 
2547 	return 0;
2548 }
2549 
2550 static int i915_guc_log_level_get(void *data, u64 *val)
2551 {
2552 	struct drm_i915_private *dev_priv = data;
2553 
2554 	if (!USES_GUC(dev_priv))
2555 		return -ENODEV;
2556 
2557 	*val = intel_guc_log_get_level(&dev_priv->guc.log);
2558 
2559 	return 0;
2560 }
2561 
2562 static int i915_guc_log_level_set(void *data, u64 val)
2563 {
2564 	struct drm_i915_private *dev_priv = data;
2565 
2566 	if (!USES_GUC(dev_priv))
2567 		return -ENODEV;
2568 
2569 	return intel_guc_log_set_level(&dev_priv->guc.log, val);
2570 }
2571 
2572 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2573 			i915_guc_log_level_get, i915_guc_log_level_set,
2574 			"%lld\n");
2575 
2576 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2577 {
2578 	struct drm_i915_private *dev_priv = inode->i_private;
2579 
2580 	if (!USES_GUC(dev_priv))
2581 		return -ENODEV;
2582 
2583 	file->private_data = &dev_priv->guc.log;
2584 
2585 	return intel_guc_log_relay_open(&dev_priv->guc.log);
2586 }
2587 
2588 static ssize_t
2589 i915_guc_log_relay_write(struct file *filp,
2590 			 const char __user *ubuf,
2591 			 size_t cnt,
2592 			 loff_t *ppos)
2593 {
2594 	struct intel_guc_log *log = filp->private_data;
2595 
2596 	intel_guc_log_relay_flush(log);
2597 
2598 	return cnt;
2599 }
2600 
2601 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2602 {
2603 	struct drm_i915_private *dev_priv = inode->i_private;
2604 
2605 	intel_guc_log_relay_close(&dev_priv->guc.log);
2606 
2607 	return 0;
2608 }
2609 
2610 static const struct file_operations i915_guc_log_relay_fops = {
2611 	.owner = THIS_MODULE,
2612 	.open = i915_guc_log_relay_open,
2613 	.write = i915_guc_log_relay_write,
2614 	.release = i915_guc_log_relay_release,
2615 };
2616 
2617 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2618 {
2619 	u8 val;
2620 	static const char * const sink_status[] = {
2621 		"inactive",
2622 		"transition to active, capture and display",
2623 		"active, display from RFB",
2624 		"active, capture and display on sink device timings",
2625 		"transition to inactive, capture and display, timing re-sync",
2626 		"reserved",
2627 		"reserved",
2628 		"sink internal error",
2629 	};
2630 	struct drm_connector *connector = m->private;
2631 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2632 	struct intel_dp *intel_dp =
2633 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2634 	int ret;
2635 
2636 	if (!CAN_PSR(dev_priv)) {
2637 		seq_puts(m, "PSR Unsupported\n");
2638 		return -ENODEV;
2639 	}
2640 
2641 	if (connector->status != connector_status_connected)
2642 		return -ENODEV;
2643 
2644 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2645 
2646 	if (ret == 1) {
2647 		const char *str = "unknown";
2648 
2649 		val &= DP_PSR_SINK_STATE_MASK;
2650 		if (val < ARRAY_SIZE(sink_status))
2651 			str = sink_status[val];
2652 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2653 	} else {
2654 		return ret;
2655 	}
2656 
2657 	return 0;
2658 }
2659 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2660 
2661 static void
2662 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2663 {
2664 	u32 val, psr_status;
2665 
2666 	if (dev_priv->psr.psr2_enabled) {
2667 		static const char * const live_status[] = {
2668 			"IDLE",
2669 			"CAPTURE",
2670 			"CAPTURE_FS",
2671 			"SLEEP",
2672 			"BUFON_FW",
2673 			"ML_UP",
2674 			"SU_STANDBY",
2675 			"FAST_SLEEP",
2676 			"DEEP_SLEEP",
2677 			"BUF_ON",
2678 			"TG_ON"
2679 		};
2680 		psr_status = I915_READ(EDP_PSR2_STATUS);
2681 		val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2682 			EDP_PSR2_STATUS_STATE_SHIFT;
2683 		if (val < ARRAY_SIZE(live_status)) {
2684 			seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2685 				   psr_status, live_status[val]);
2686 			return;
2687 		}
2688 	} else {
2689 		static const char * const live_status[] = {
2690 			"IDLE",
2691 			"SRDONACK",
2692 			"SRDENT",
2693 			"BUFOFF",
2694 			"BUFON",
2695 			"AUXACK",
2696 			"SRDOFFACK",
2697 			"SRDENT_ON",
2698 		};
2699 		psr_status = I915_READ(EDP_PSR_STATUS);
2700 		val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2701 			EDP_PSR_STATUS_STATE_SHIFT;
2702 		if (val < ARRAY_SIZE(live_status)) {
2703 			seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2704 				   psr_status, live_status[val]);
2705 			return;
2706 		}
2707 	}
2708 
2709 	seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
2710 }
2711 
2712 static int i915_edp_psr_status(struct seq_file *m, void *data)
2713 {
2714 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2715 	u32 psrperf = 0;
2716 	bool enabled = false;
2717 	bool sink_support;
2718 
2719 	if (!HAS_PSR(dev_priv))
2720 		return -ENODEV;
2721 
2722 	sink_support = dev_priv->psr.sink_support;
2723 	seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2724 	if (!sink_support)
2725 		return 0;
2726 
2727 	intel_runtime_pm_get(dev_priv);
2728 
2729 	mutex_lock(&dev_priv->psr.lock);
2730 	seq_printf(m, "PSR mode: %s\n",
2731 		   dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
2732 	seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
2733 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2734 		   dev_priv->psr.busy_frontbuffer_bits);
2735 
2736 	if (dev_priv->psr.psr2_enabled)
2737 		enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2738 	else
2739 		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2740 
2741 	seq_printf(m, "Main link in standby mode: %s\n",
2742 		   yesno(dev_priv->psr.link_standby));
2743 
2744 	seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2745 
2746 	/*
2747 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2748 	 */
2749 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2750 		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2751 			EDP_PSR_PERF_CNT_MASK;
2752 
2753 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2754 	}
2755 
2756 	psr_source_status(dev_priv, m);
2757 	mutex_unlock(&dev_priv->psr.lock);
2758 
2759 	if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
2760 		seq_printf(m, "Last attempted entry at: %lld\n",
2761 			   dev_priv->psr.last_entry_attempt);
2762 		seq_printf(m, "Last exit at: %lld\n",
2763 			   dev_priv->psr.last_exit);
2764 	}
2765 
2766 	intel_runtime_pm_put(dev_priv);
2767 	return 0;
2768 }
2769 
2770 static int
2771 i915_edp_psr_debug_set(void *data, u64 val)
2772 {
2773 	struct drm_i915_private *dev_priv = data;
2774 	struct drm_modeset_acquire_ctx ctx;
2775 	int ret;
2776 
2777 	if (!CAN_PSR(dev_priv))
2778 		return -ENODEV;
2779 
2780 	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2781 
2782 	intel_runtime_pm_get(dev_priv);
2783 
2784 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2785 
2786 retry:
2787 	ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2788 	if (ret == -EDEADLK) {
2789 		ret = drm_modeset_backoff(&ctx);
2790 		if (!ret)
2791 			goto retry;
2792 	}
2793 
2794 	drm_modeset_drop_locks(&ctx);
2795 	drm_modeset_acquire_fini(&ctx);
2796 
2797 	intel_runtime_pm_put(dev_priv);
2798 
2799 	return ret;
2800 }
2801 
2802 static int
2803 i915_edp_psr_debug_get(void *data, u64 *val)
2804 {
2805 	struct drm_i915_private *dev_priv = data;
2806 
2807 	if (!CAN_PSR(dev_priv))
2808 		return -ENODEV;
2809 
2810 	*val = READ_ONCE(dev_priv->psr.debug);
2811 	return 0;
2812 }
2813 
2814 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2815 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2816 			"%llu\n");
2817 
2818 static int i915_energy_uJ(struct seq_file *m, void *data)
2819 {
2820 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2821 	unsigned long long power;
2822 	u32 units;
2823 
2824 	if (INTEL_GEN(dev_priv) < 6)
2825 		return -ENODEV;
2826 
2827 	intel_runtime_pm_get(dev_priv);
2828 
2829 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2830 		intel_runtime_pm_put(dev_priv);
2831 		return -ENODEV;
2832 	}
2833 
2834 	units = (power & 0x1f00) >> 8;
2835 	power = I915_READ(MCH_SECP_NRG_STTS);
2836 	power = (1000000 * power) >> units; /* convert to uJ */
2837 
2838 	intel_runtime_pm_put(dev_priv);
2839 
2840 	seq_printf(m, "%llu", power);
2841 
2842 	return 0;
2843 }
2844 
2845 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2846 {
2847 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2848 	struct pci_dev *pdev = dev_priv->drm.pdev;
2849 
2850 	if (!HAS_RUNTIME_PM(dev_priv))
2851 		seq_puts(m, "Runtime power management not supported\n");
2852 
2853 	seq_printf(m, "GPU idle: %s (epoch %u)\n",
2854 		   yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2855 	seq_printf(m, "IRQs disabled: %s\n",
2856 		   yesno(!intel_irqs_enabled(dev_priv)));
2857 #ifdef CONFIG_PM
2858 	seq_printf(m, "Usage count: %d\n",
2859 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2860 #else
2861 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2862 #endif
2863 	seq_printf(m, "PCI device power state: %s [%d]\n",
2864 		   pci_power_name(pdev->current_state),
2865 		   pdev->current_state);
2866 
2867 	return 0;
2868 }
2869 
2870 static int i915_power_domain_info(struct seq_file *m, void *unused)
2871 {
2872 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2873 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2874 	int i;
2875 
2876 	mutex_lock(&power_domains->lock);
2877 
2878 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2879 	for (i = 0; i < power_domains->power_well_count; i++) {
2880 		struct i915_power_well *power_well;
2881 		enum intel_display_power_domain power_domain;
2882 
2883 		power_well = &power_domains->power_wells[i];
2884 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
2885 			   power_well->count);
2886 
2887 		for_each_power_domain(power_domain, power_well->desc->domains)
2888 			seq_printf(m, "  %-23s %d\n",
2889 				 intel_display_power_domain_str(power_domain),
2890 				 power_domains->domain_use_count[power_domain]);
2891 	}
2892 
2893 	mutex_unlock(&power_domains->lock);
2894 
2895 	return 0;
2896 }
2897 
2898 static int i915_dmc_info(struct seq_file *m, void *unused)
2899 {
2900 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2901 	struct intel_csr *csr;
2902 
2903 	if (!HAS_CSR(dev_priv))
2904 		return -ENODEV;
2905 
2906 	csr = &dev_priv->csr;
2907 
2908 	intel_runtime_pm_get(dev_priv);
2909 
2910 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2911 	seq_printf(m, "path: %s\n", csr->fw_path);
2912 
2913 	if (!csr->dmc_payload)
2914 		goto out;
2915 
2916 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2917 		   CSR_VERSION_MINOR(csr->version));
2918 
2919 	if (IS_BROXTON(dev_priv)) {
2920 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2921 			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2922 	} else if (IS_GEN(dev_priv, 9, 11)) {
2923 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2924 			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
2925 		seq_printf(m, "DC5 -> DC6 count: %d\n",
2926 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2927 	}
2928 
2929 out:
2930 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2931 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2932 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2933 
2934 	intel_runtime_pm_put(dev_priv);
2935 
2936 	return 0;
2937 }
2938 
2939 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2940 				 struct drm_display_mode *mode)
2941 {
2942 	int i;
2943 
2944 	for (i = 0; i < tabs; i++)
2945 		seq_putc(m, '\t');
2946 
2947 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2948 		   mode->base.id, mode->name,
2949 		   mode->vrefresh, mode->clock,
2950 		   mode->hdisplay, mode->hsync_start,
2951 		   mode->hsync_end, mode->htotal,
2952 		   mode->vdisplay, mode->vsync_start,
2953 		   mode->vsync_end, mode->vtotal,
2954 		   mode->type, mode->flags);
2955 }
2956 
2957 static void intel_encoder_info(struct seq_file *m,
2958 			       struct intel_crtc *intel_crtc,
2959 			       struct intel_encoder *intel_encoder)
2960 {
2961 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2962 	struct drm_device *dev = &dev_priv->drm;
2963 	struct drm_crtc *crtc = &intel_crtc->base;
2964 	struct intel_connector *intel_connector;
2965 	struct drm_encoder *encoder;
2966 
2967 	encoder = &intel_encoder->base;
2968 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2969 		   encoder->base.id, encoder->name);
2970 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2971 		struct drm_connector *connector = &intel_connector->base;
2972 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2973 			   connector->base.id,
2974 			   connector->name,
2975 			   drm_get_connector_status_name(connector->status));
2976 		if (connector->status == connector_status_connected) {
2977 			struct drm_display_mode *mode = &crtc->mode;
2978 			seq_printf(m, ", mode:\n");
2979 			intel_seq_print_mode(m, 2, mode);
2980 		} else {
2981 			seq_putc(m, '\n');
2982 		}
2983 	}
2984 }
2985 
2986 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2987 {
2988 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2989 	struct drm_device *dev = &dev_priv->drm;
2990 	struct drm_crtc *crtc = &intel_crtc->base;
2991 	struct intel_encoder *intel_encoder;
2992 	struct drm_plane_state *plane_state = crtc->primary->state;
2993 	struct drm_framebuffer *fb = plane_state->fb;
2994 
2995 	if (fb)
2996 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2997 			   fb->base.id, plane_state->src_x >> 16,
2998 			   plane_state->src_y >> 16, fb->width, fb->height);
2999 	else
3000 		seq_puts(m, "\tprimary plane disabled\n");
3001 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3002 		intel_encoder_info(m, intel_crtc, intel_encoder);
3003 }
3004 
3005 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3006 {
3007 	struct drm_display_mode *mode = panel->fixed_mode;
3008 
3009 	seq_printf(m, "\tfixed mode:\n");
3010 	intel_seq_print_mode(m, 2, mode);
3011 }
3012 
3013 static void intel_dp_info(struct seq_file *m,
3014 			  struct intel_connector *intel_connector)
3015 {
3016 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3017 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3018 
3019 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3020 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3021 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3022 		intel_panel_info(m, &intel_connector->panel);
3023 
3024 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3025 				&intel_dp->aux);
3026 }
3027 
3028 static void intel_dp_mst_info(struct seq_file *m,
3029 			  struct intel_connector *intel_connector)
3030 {
3031 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3032 	struct intel_dp_mst_encoder *intel_mst =
3033 		enc_to_mst(&intel_encoder->base);
3034 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
3035 	struct intel_dp *intel_dp = &intel_dig_port->dp;
3036 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3037 					intel_connector->port);
3038 
3039 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3040 }
3041 
3042 static void intel_hdmi_info(struct seq_file *m,
3043 			    struct intel_connector *intel_connector)
3044 {
3045 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3046 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3047 
3048 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3049 }
3050 
3051 static void intel_lvds_info(struct seq_file *m,
3052 			    struct intel_connector *intel_connector)
3053 {
3054 	intel_panel_info(m, &intel_connector->panel);
3055 }
3056 
3057 static void intel_connector_info(struct seq_file *m,
3058 				 struct drm_connector *connector)
3059 {
3060 	struct intel_connector *intel_connector = to_intel_connector(connector);
3061 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3062 	struct drm_display_mode *mode;
3063 
3064 	seq_printf(m, "connector %d: type %s, status: %s\n",
3065 		   connector->base.id, connector->name,
3066 		   drm_get_connector_status_name(connector->status));
3067 
3068 	if (connector->status == connector_status_disconnected)
3069 		return;
3070 
3071 	seq_printf(m, "\tname: %s\n", connector->display_info.name);
3072 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3073 		   connector->display_info.width_mm,
3074 		   connector->display_info.height_mm);
3075 	seq_printf(m, "\tsubpixel order: %s\n",
3076 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3077 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
3078 
3079 	if (!intel_encoder)
3080 		return;
3081 
3082 	switch (connector->connector_type) {
3083 	case DRM_MODE_CONNECTOR_DisplayPort:
3084 	case DRM_MODE_CONNECTOR_eDP:
3085 		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3086 			intel_dp_mst_info(m, intel_connector);
3087 		else
3088 			intel_dp_info(m, intel_connector);
3089 		break;
3090 	case DRM_MODE_CONNECTOR_LVDS:
3091 		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3092 			intel_lvds_info(m, intel_connector);
3093 		break;
3094 	case DRM_MODE_CONNECTOR_HDMIA:
3095 		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3096 		    intel_encoder->type == INTEL_OUTPUT_DDI)
3097 			intel_hdmi_info(m, intel_connector);
3098 		break;
3099 	default:
3100 		break;
3101 	}
3102 
3103 	seq_printf(m, "\tmodes:\n");
3104 	list_for_each_entry(mode, &connector->modes, head)
3105 		intel_seq_print_mode(m, 2, mode);
3106 }
3107 
3108 static const char *plane_type(enum drm_plane_type type)
3109 {
3110 	switch (type) {
3111 	case DRM_PLANE_TYPE_OVERLAY:
3112 		return "OVL";
3113 	case DRM_PLANE_TYPE_PRIMARY:
3114 		return "PRI";
3115 	case DRM_PLANE_TYPE_CURSOR:
3116 		return "CUR";
3117 	/*
3118 	 * Deliberately omitting default: to generate compiler warnings
3119 	 * when a new drm_plane_type gets added.
3120 	 */
3121 	}
3122 
3123 	return "unknown";
3124 }
3125 
3126 static const char *plane_rotation(unsigned int rotation)
3127 {
3128 	static char buf[48];
3129 	/*
3130 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3131 	 * will print them all to visualize if the values are misused
3132 	 */
3133 	snprintf(buf, sizeof(buf),
3134 		 "%s%s%s%s%s%s(0x%08x)",
3135 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3136 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3137 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3138 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3139 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3140 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3141 		 rotation);
3142 
3143 	return buf;
3144 }
3145 
3146 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3147 {
3148 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3149 	struct drm_device *dev = &dev_priv->drm;
3150 	struct intel_plane *intel_plane;
3151 
3152 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3153 		struct drm_plane_state *state;
3154 		struct drm_plane *plane = &intel_plane->base;
3155 		struct drm_format_name_buf format_name;
3156 
3157 		if (!plane->state) {
3158 			seq_puts(m, "plane->state is NULL!\n");
3159 			continue;
3160 		}
3161 
3162 		state = plane->state;
3163 
3164 		if (state->fb) {
3165 			drm_get_format_name(state->fb->format->format,
3166 					    &format_name);
3167 		} else {
3168 			sprintf(format_name.str, "N/A");
3169 		}
3170 
3171 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3172 			   plane->base.id,
3173 			   plane_type(intel_plane->base.type),
3174 			   state->crtc_x, state->crtc_y,
3175 			   state->crtc_w, state->crtc_h,
3176 			   (state->src_x >> 16),
3177 			   ((state->src_x & 0xffff) * 15625) >> 10,
3178 			   (state->src_y >> 16),
3179 			   ((state->src_y & 0xffff) * 15625) >> 10,
3180 			   (state->src_w >> 16),
3181 			   ((state->src_w & 0xffff) * 15625) >> 10,
3182 			   (state->src_h >> 16),
3183 			   ((state->src_h & 0xffff) * 15625) >> 10,
3184 			   format_name.str,
3185 			   plane_rotation(state->rotation));
3186 	}
3187 }
3188 
3189 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3190 {
3191 	struct intel_crtc_state *pipe_config;
3192 	int num_scalers = intel_crtc->num_scalers;
3193 	int i;
3194 
3195 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3196 
3197 	/* Not all platformas have a scaler */
3198 	if (num_scalers) {
3199 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3200 			   num_scalers,
3201 			   pipe_config->scaler_state.scaler_users,
3202 			   pipe_config->scaler_state.scaler_id);
3203 
3204 		for (i = 0; i < num_scalers; i++) {
3205 			struct intel_scaler *sc =
3206 					&pipe_config->scaler_state.scalers[i];
3207 
3208 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3209 				   i, yesno(sc->in_use), sc->mode);
3210 		}
3211 		seq_puts(m, "\n");
3212 	} else {
3213 		seq_puts(m, "\tNo scalers available on this platform\n");
3214 	}
3215 }
3216 
3217 static int i915_display_info(struct seq_file *m, void *unused)
3218 {
3219 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3220 	struct drm_device *dev = &dev_priv->drm;
3221 	struct intel_crtc *crtc;
3222 	struct drm_connector *connector;
3223 	struct drm_connector_list_iter conn_iter;
3224 
3225 	intel_runtime_pm_get(dev_priv);
3226 	seq_printf(m, "CRTC info\n");
3227 	seq_printf(m, "---------\n");
3228 	for_each_intel_crtc(dev, crtc) {
3229 		struct intel_crtc_state *pipe_config;
3230 
3231 		drm_modeset_lock(&crtc->base.mutex, NULL);
3232 		pipe_config = to_intel_crtc_state(crtc->base.state);
3233 
3234 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3235 			   crtc->base.base.id, pipe_name(crtc->pipe),
3236 			   yesno(pipe_config->base.active),
3237 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3238 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3239 
3240 		if (pipe_config->base.active) {
3241 			struct intel_plane *cursor =
3242 				to_intel_plane(crtc->base.cursor);
3243 
3244 			intel_crtc_info(m, crtc);
3245 
3246 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3247 				   yesno(cursor->base.state->visible),
3248 				   cursor->base.state->crtc_x,
3249 				   cursor->base.state->crtc_y,
3250 				   cursor->base.state->crtc_w,
3251 				   cursor->base.state->crtc_h,
3252 				   cursor->cursor.base);
3253 			intel_scaler_info(m, crtc);
3254 			intel_plane_info(m, crtc);
3255 		}
3256 
3257 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3258 			   yesno(!crtc->cpu_fifo_underrun_disabled),
3259 			   yesno(!crtc->pch_fifo_underrun_disabled));
3260 		drm_modeset_unlock(&crtc->base.mutex);
3261 	}
3262 
3263 	seq_printf(m, "\n");
3264 	seq_printf(m, "Connector info\n");
3265 	seq_printf(m, "--------------\n");
3266 	mutex_lock(&dev->mode_config.mutex);
3267 	drm_connector_list_iter_begin(dev, &conn_iter);
3268 	drm_for_each_connector_iter(connector, &conn_iter)
3269 		intel_connector_info(m, connector);
3270 	drm_connector_list_iter_end(&conn_iter);
3271 	mutex_unlock(&dev->mode_config.mutex);
3272 
3273 	intel_runtime_pm_put(dev_priv);
3274 
3275 	return 0;
3276 }
3277 
3278 static int i915_engine_info(struct seq_file *m, void *unused)
3279 {
3280 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3281 	struct intel_engine_cs *engine;
3282 	enum intel_engine_id id;
3283 	struct drm_printer p;
3284 
3285 	intel_runtime_pm_get(dev_priv);
3286 
3287 	seq_printf(m, "GT awake? %s (epoch %u)\n",
3288 		   yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3289 	seq_printf(m, "Global active requests: %d\n",
3290 		   dev_priv->gt.active_requests);
3291 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
3292 		   dev_priv->info.cs_timestamp_frequency_khz);
3293 
3294 	p = drm_seq_file_printer(m);
3295 	for_each_engine(engine, dev_priv, id)
3296 		intel_engine_dump(engine, &p, "%s\n", engine->name);
3297 
3298 	intel_runtime_pm_put(dev_priv);
3299 
3300 	return 0;
3301 }
3302 
3303 static int i915_rcs_topology(struct seq_file *m, void *unused)
3304 {
3305 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3306 	struct drm_printer p = drm_seq_file_printer(m);
3307 
3308 	intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3309 
3310 	return 0;
3311 }
3312 
3313 static int i915_shrinker_info(struct seq_file *m, void *unused)
3314 {
3315 	struct drm_i915_private *i915 = node_to_i915(m->private);
3316 
3317 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3318 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3319 
3320 	return 0;
3321 }
3322 
3323 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3324 {
3325 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3326 	struct drm_device *dev = &dev_priv->drm;
3327 	int i;
3328 
3329 	drm_modeset_lock_all(dev);
3330 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3331 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3332 
3333 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3334 			   pll->info->id);
3335 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3336 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3337 		seq_printf(m, " tracked hardware state:\n");
3338 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3339 		seq_printf(m, " dpll_md: 0x%08x\n",
3340 			   pll->state.hw_state.dpll_md);
3341 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3342 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3343 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3344 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3345 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3346 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3347 			   pll->state.hw_state.mg_refclkin_ctl);
3348 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3349 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
3350 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3351 			   pll->state.hw_state.mg_clktop2_hsclkctl);
3352 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
3353 			   pll->state.hw_state.mg_pll_div0);
3354 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
3355 			   pll->state.hw_state.mg_pll_div1);
3356 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
3357 			   pll->state.hw_state.mg_pll_lf);
3358 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3359 			   pll->state.hw_state.mg_pll_frac_lock);
3360 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3361 			   pll->state.hw_state.mg_pll_ssc);
3362 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
3363 			   pll->state.hw_state.mg_pll_bias);
3364 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3365 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
3366 	}
3367 	drm_modeset_unlock_all(dev);
3368 
3369 	return 0;
3370 }
3371 
3372 static int i915_wa_registers(struct seq_file *m, void *unused)
3373 {
3374 	struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
3375 	int i;
3376 
3377 	seq_printf(m, "Workarounds applied: %d\n", wa->count);
3378 	for (i = 0; i < wa->count; ++i)
3379 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3380 			   wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
3381 
3382 	return 0;
3383 }
3384 
3385 static int i915_ipc_status_show(struct seq_file *m, void *data)
3386 {
3387 	struct drm_i915_private *dev_priv = m->private;
3388 
3389 	seq_printf(m, "Isochronous Priority Control: %s\n",
3390 			yesno(dev_priv->ipc_enabled));
3391 	return 0;
3392 }
3393 
3394 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3395 {
3396 	struct drm_i915_private *dev_priv = inode->i_private;
3397 
3398 	if (!HAS_IPC(dev_priv))
3399 		return -ENODEV;
3400 
3401 	return single_open(file, i915_ipc_status_show, dev_priv);
3402 }
3403 
3404 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3405 				     size_t len, loff_t *offp)
3406 {
3407 	struct seq_file *m = file->private_data;
3408 	struct drm_i915_private *dev_priv = m->private;
3409 	int ret;
3410 	bool enable;
3411 
3412 	ret = kstrtobool_from_user(ubuf, len, &enable);
3413 	if (ret < 0)
3414 		return ret;
3415 
3416 	intel_runtime_pm_get(dev_priv);
3417 	if (!dev_priv->ipc_enabled && enable)
3418 		DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3419 	dev_priv->wm.distrust_bios_wm = true;
3420 	dev_priv->ipc_enabled = enable;
3421 	intel_enable_ipc(dev_priv);
3422 	intel_runtime_pm_put(dev_priv);
3423 
3424 	return len;
3425 }
3426 
3427 static const struct file_operations i915_ipc_status_fops = {
3428 	.owner = THIS_MODULE,
3429 	.open = i915_ipc_status_open,
3430 	.read = seq_read,
3431 	.llseek = seq_lseek,
3432 	.release = single_release,
3433 	.write = i915_ipc_status_write
3434 };
3435 
3436 static int i915_ddb_info(struct seq_file *m, void *unused)
3437 {
3438 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3439 	struct drm_device *dev = &dev_priv->drm;
3440 	struct skl_ddb_allocation *ddb;
3441 	struct skl_ddb_entry *entry;
3442 	enum pipe pipe;
3443 	int plane;
3444 
3445 	if (INTEL_GEN(dev_priv) < 9)
3446 		return -ENODEV;
3447 
3448 	drm_modeset_lock_all(dev);
3449 
3450 	ddb = &dev_priv->wm.skl_hw.ddb;
3451 
3452 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3453 
3454 	for_each_pipe(dev_priv, pipe) {
3455 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3456 
3457 		for_each_universal_plane(dev_priv, pipe, plane) {
3458 			entry = &ddb->plane[pipe][plane];
3459 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3460 				   entry->start, entry->end,
3461 				   skl_ddb_entry_size(entry));
3462 		}
3463 
3464 		entry = &ddb->plane[pipe][PLANE_CURSOR];
3465 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3466 			   entry->end, skl_ddb_entry_size(entry));
3467 	}
3468 
3469 	drm_modeset_unlock_all(dev);
3470 
3471 	return 0;
3472 }
3473 
3474 static void drrs_status_per_crtc(struct seq_file *m,
3475 				 struct drm_device *dev,
3476 				 struct intel_crtc *intel_crtc)
3477 {
3478 	struct drm_i915_private *dev_priv = to_i915(dev);
3479 	struct i915_drrs *drrs = &dev_priv->drrs;
3480 	int vrefresh = 0;
3481 	struct drm_connector *connector;
3482 	struct drm_connector_list_iter conn_iter;
3483 
3484 	drm_connector_list_iter_begin(dev, &conn_iter);
3485 	drm_for_each_connector_iter(connector, &conn_iter) {
3486 		if (connector->state->crtc != &intel_crtc->base)
3487 			continue;
3488 
3489 		seq_printf(m, "%s:\n", connector->name);
3490 	}
3491 	drm_connector_list_iter_end(&conn_iter);
3492 
3493 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3494 		seq_puts(m, "\tVBT: DRRS_type: Static");
3495 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3496 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3497 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3498 		seq_puts(m, "\tVBT: DRRS_type: None");
3499 	else
3500 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3501 
3502 	seq_puts(m, "\n\n");
3503 
3504 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3505 		struct intel_panel *panel;
3506 
3507 		mutex_lock(&drrs->mutex);
3508 		/* DRRS Supported */
3509 		seq_puts(m, "\tDRRS Supported: Yes\n");
3510 
3511 		/* disable_drrs() will make drrs->dp NULL */
3512 		if (!drrs->dp) {
3513 			seq_puts(m, "Idleness DRRS: Disabled\n");
3514 			if (dev_priv->psr.enabled)
3515 				seq_puts(m,
3516 				"\tAs PSR is enabled, DRRS is not enabled\n");
3517 			mutex_unlock(&drrs->mutex);
3518 			return;
3519 		}
3520 
3521 		panel = &drrs->dp->attached_connector->panel;
3522 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3523 					drrs->busy_frontbuffer_bits);
3524 
3525 		seq_puts(m, "\n\t\t");
3526 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3527 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3528 			vrefresh = panel->fixed_mode->vrefresh;
3529 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3530 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3531 			vrefresh = panel->downclock_mode->vrefresh;
3532 		} else {
3533 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3534 						drrs->refresh_rate_type);
3535 			mutex_unlock(&drrs->mutex);
3536 			return;
3537 		}
3538 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3539 
3540 		seq_puts(m, "\n\t\t");
3541 		mutex_unlock(&drrs->mutex);
3542 	} else {
3543 		/* DRRS not supported. Print the VBT parameter*/
3544 		seq_puts(m, "\tDRRS Supported : No");
3545 	}
3546 	seq_puts(m, "\n");
3547 }
3548 
3549 static int i915_drrs_status(struct seq_file *m, void *unused)
3550 {
3551 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3552 	struct drm_device *dev = &dev_priv->drm;
3553 	struct intel_crtc *intel_crtc;
3554 	int active_crtc_cnt = 0;
3555 
3556 	drm_modeset_lock_all(dev);
3557 	for_each_intel_crtc(dev, intel_crtc) {
3558 		if (intel_crtc->base.state->active) {
3559 			active_crtc_cnt++;
3560 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3561 
3562 			drrs_status_per_crtc(m, dev, intel_crtc);
3563 		}
3564 	}
3565 	drm_modeset_unlock_all(dev);
3566 
3567 	if (!active_crtc_cnt)
3568 		seq_puts(m, "No active crtc found\n");
3569 
3570 	return 0;
3571 }
3572 
3573 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3574 {
3575 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3576 	struct drm_device *dev = &dev_priv->drm;
3577 	struct intel_encoder *intel_encoder;
3578 	struct intel_digital_port *intel_dig_port;
3579 	struct drm_connector *connector;
3580 	struct drm_connector_list_iter conn_iter;
3581 
3582 	drm_connector_list_iter_begin(dev, &conn_iter);
3583 	drm_for_each_connector_iter(connector, &conn_iter) {
3584 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3585 			continue;
3586 
3587 		intel_encoder = intel_attached_encoder(connector);
3588 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3589 			continue;
3590 
3591 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3592 		if (!intel_dig_port->dp.can_mst)
3593 			continue;
3594 
3595 		seq_printf(m, "MST Source Port %c\n",
3596 			   port_name(intel_dig_port->base.port));
3597 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3598 	}
3599 	drm_connector_list_iter_end(&conn_iter);
3600 
3601 	return 0;
3602 }
3603 
3604 static ssize_t i915_displayport_test_active_write(struct file *file,
3605 						  const char __user *ubuf,
3606 						  size_t len, loff_t *offp)
3607 {
3608 	char *input_buffer;
3609 	int status = 0;
3610 	struct drm_device *dev;
3611 	struct drm_connector *connector;
3612 	struct drm_connector_list_iter conn_iter;
3613 	struct intel_dp *intel_dp;
3614 	int val = 0;
3615 
3616 	dev = ((struct seq_file *)file->private_data)->private;
3617 
3618 	if (len == 0)
3619 		return 0;
3620 
3621 	input_buffer = memdup_user_nul(ubuf, len);
3622 	if (IS_ERR(input_buffer))
3623 		return PTR_ERR(input_buffer);
3624 
3625 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3626 
3627 	drm_connector_list_iter_begin(dev, &conn_iter);
3628 	drm_for_each_connector_iter(connector, &conn_iter) {
3629 		struct intel_encoder *encoder;
3630 
3631 		if (connector->connector_type !=
3632 		    DRM_MODE_CONNECTOR_DisplayPort)
3633 			continue;
3634 
3635 		encoder = to_intel_encoder(connector->encoder);
3636 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3637 			continue;
3638 
3639 		if (encoder && connector->status == connector_status_connected) {
3640 			intel_dp = enc_to_intel_dp(&encoder->base);
3641 			status = kstrtoint(input_buffer, 10, &val);
3642 			if (status < 0)
3643 				break;
3644 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3645 			/* To prevent erroneous activation of the compliance
3646 			 * testing code, only accept an actual value of 1 here
3647 			 */
3648 			if (val == 1)
3649 				intel_dp->compliance.test_active = 1;
3650 			else
3651 				intel_dp->compliance.test_active = 0;
3652 		}
3653 	}
3654 	drm_connector_list_iter_end(&conn_iter);
3655 	kfree(input_buffer);
3656 	if (status < 0)
3657 		return status;
3658 
3659 	*offp += len;
3660 	return len;
3661 }
3662 
3663 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3664 {
3665 	struct drm_i915_private *dev_priv = m->private;
3666 	struct drm_device *dev = &dev_priv->drm;
3667 	struct drm_connector *connector;
3668 	struct drm_connector_list_iter conn_iter;
3669 	struct intel_dp *intel_dp;
3670 
3671 	drm_connector_list_iter_begin(dev, &conn_iter);
3672 	drm_for_each_connector_iter(connector, &conn_iter) {
3673 		struct intel_encoder *encoder;
3674 
3675 		if (connector->connector_type !=
3676 		    DRM_MODE_CONNECTOR_DisplayPort)
3677 			continue;
3678 
3679 		encoder = to_intel_encoder(connector->encoder);
3680 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3681 			continue;
3682 
3683 		if (encoder && connector->status == connector_status_connected) {
3684 			intel_dp = enc_to_intel_dp(&encoder->base);
3685 			if (intel_dp->compliance.test_active)
3686 				seq_puts(m, "1");
3687 			else
3688 				seq_puts(m, "0");
3689 		} else
3690 			seq_puts(m, "0");
3691 	}
3692 	drm_connector_list_iter_end(&conn_iter);
3693 
3694 	return 0;
3695 }
3696 
3697 static int i915_displayport_test_active_open(struct inode *inode,
3698 					     struct file *file)
3699 {
3700 	return single_open(file, i915_displayport_test_active_show,
3701 			   inode->i_private);
3702 }
3703 
3704 static const struct file_operations i915_displayport_test_active_fops = {
3705 	.owner = THIS_MODULE,
3706 	.open = i915_displayport_test_active_open,
3707 	.read = seq_read,
3708 	.llseek = seq_lseek,
3709 	.release = single_release,
3710 	.write = i915_displayport_test_active_write
3711 };
3712 
3713 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3714 {
3715 	struct drm_i915_private *dev_priv = m->private;
3716 	struct drm_device *dev = &dev_priv->drm;
3717 	struct drm_connector *connector;
3718 	struct drm_connector_list_iter conn_iter;
3719 	struct intel_dp *intel_dp;
3720 
3721 	drm_connector_list_iter_begin(dev, &conn_iter);
3722 	drm_for_each_connector_iter(connector, &conn_iter) {
3723 		struct intel_encoder *encoder;
3724 
3725 		if (connector->connector_type !=
3726 		    DRM_MODE_CONNECTOR_DisplayPort)
3727 			continue;
3728 
3729 		encoder = to_intel_encoder(connector->encoder);
3730 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3731 			continue;
3732 
3733 		if (encoder && connector->status == connector_status_connected) {
3734 			intel_dp = enc_to_intel_dp(&encoder->base);
3735 			if (intel_dp->compliance.test_type ==
3736 			    DP_TEST_LINK_EDID_READ)
3737 				seq_printf(m, "%lx",
3738 					   intel_dp->compliance.test_data.edid);
3739 			else if (intel_dp->compliance.test_type ==
3740 				 DP_TEST_LINK_VIDEO_PATTERN) {
3741 				seq_printf(m, "hdisplay: %d\n",
3742 					   intel_dp->compliance.test_data.hdisplay);
3743 				seq_printf(m, "vdisplay: %d\n",
3744 					   intel_dp->compliance.test_data.vdisplay);
3745 				seq_printf(m, "bpc: %u\n",
3746 					   intel_dp->compliance.test_data.bpc);
3747 			}
3748 		} else
3749 			seq_puts(m, "0");
3750 	}
3751 	drm_connector_list_iter_end(&conn_iter);
3752 
3753 	return 0;
3754 }
3755 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3756 
3757 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3758 {
3759 	struct drm_i915_private *dev_priv = m->private;
3760 	struct drm_device *dev = &dev_priv->drm;
3761 	struct drm_connector *connector;
3762 	struct drm_connector_list_iter conn_iter;
3763 	struct intel_dp *intel_dp;
3764 
3765 	drm_connector_list_iter_begin(dev, &conn_iter);
3766 	drm_for_each_connector_iter(connector, &conn_iter) {
3767 		struct intel_encoder *encoder;
3768 
3769 		if (connector->connector_type !=
3770 		    DRM_MODE_CONNECTOR_DisplayPort)
3771 			continue;
3772 
3773 		encoder = to_intel_encoder(connector->encoder);
3774 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3775 			continue;
3776 
3777 		if (encoder && connector->status == connector_status_connected) {
3778 			intel_dp = enc_to_intel_dp(&encoder->base);
3779 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3780 		} else
3781 			seq_puts(m, "0");
3782 	}
3783 	drm_connector_list_iter_end(&conn_iter);
3784 
3785 	return 0;
3786 }
3787 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3788 
3789 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3790 {
3791 	struct drm_i915_private *dev_priv = m->private;
3792 	struct drm_device *dev = &dev_priv->drm;
3793 	int level;
3794 	int num_levels;
3795 
3796 	if (IS_CHERRYVIEW(dev_priv))
3797 		num_levels = 3;
3798 	else if (IS_VALLEYVIEW(dev_priv))
3799 		num_levels = 1;
3800 	else if (IS_G4X(dev_priv))
3801 		num_levels = 3;
3802 	else
3803 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3804 
3805 	drm_modeset_lock_all(dev);
3806 
3807 	for (level = 0; level < num_levels; level++) {
3808 		unsigned int latency = wm[level];
3809 
3810 		/*
3811 		 * - WM1+ latency values in 0.5us units
3812 		 * - latencies are in us on gen9/vlv/chv
3813 		 */
3814 		if (INTEL_GEN(dev_priv) >= 9 ||
3815 		    IS_VALLEYVIEW(dev_priv) ||
3816 		    IS_CHERRYVIEW(dev_priv) ||
3817 		    IS_G4X(dev_priv))
3818 			latency *= 10;
3819 		else if (level > 0)
3820 			latency *= 5;
3821 
3822 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3823 			   level, wm[level], latency / 10, latency % 10);
3824 	}
3825 
3826 	drm_modeset_unlock_all(dev);
3827 }
3828 
3829 static int pri_wm_latency_show(struct seq_file *m, void *data)
3830 {
3831 	struct drm_i915_private *dev_priv = m->private;
3832 	const uint16_t *latencies;
3833 
3834 	if (INTEL_GEN(dev_priv) >= 9)
3835 		latencies = dev_priv->wm.skl_latency;
3836 	else
3837 		latencies = dev_priv->wm.pri_latency;
3838 
3839 	wm_latency_show(m, latencies);
3840 
3841 	return 0;
3842 }
3843 
3844 static int spr_wm_latency_show(struct seq_file *m, void *data)
3845 {
3846 	struct drm_i915_private *dev_priv = m->private;
3847 	const uint16_t *latencies;
3848 
3849 	if (INTEL_GEN(dev_priv) >= 9)
3850 		latencies = dev_priv->wm.skl_latency;
3851 	else
3852 		latencies = dev_priv->wm.spr_latency;
3853 
3854 	wm_latency_show(m, latencies);
3855 
3856 	return 0;
3857 }
3858 
3859 static int cur_wm_latency_show(struct seq_file *m, void *data)
3860 {
3861 	struct drm_i915_private *dev_priv = m->private;
3862 	const uint16_t *latencies;
3863 
3864 	if (INTEL_GEN(dev_priv) >= 9)
3865 		latencies = dev_priv->wm.skl_latency;
3866 	else
3867 		latencies = dev_priv->wm.cur_latency;
3868 
3869 	wm_latency_show(m, latencies);
3870 
3871 	return 0;
3872 }
3873 
3874 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3875 {
3876 	struct drm_i915_private *dev_priv = inode->i_private;
3877 
3878 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3879 		return -ENODEV;
3880 
3881 	return single_open(file, pri_wm_latency_show, dev_priv);
3882 }
3883 
3884 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3885 {
3886 	struct drm_i915_private *dev_priv = inode->i_private;
3887 
3888 	if (HAS_GMCH_DISPLAY(dev_priv))
3889 		return -ENODEV;
3890 
3891 	return single_open(file, spr_wm_latency_show, dev_priv);
3892 }
3893 
3894 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3895 {
3896 	struct drm_i915_private *dev_priv = inode->i_private;
3897 
3898 	if (HAS_GMCH_DISPLAY(dev_priv))
3899 		return -ENODEV;
3900 
3901 	return single_open(file, cur_wm_latency_show, dev_priv);
3902 }
3903 
3904 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3905 				size_t len, loff_t *offp, uint16_t wm[8])
3906 {
3907 	struct seq_file *m = file->private_data;
3908 	struct drm_i915_private *dev_priv = m->private;
3909 	struct drm_device *dev = &dev_priv->drm;
3910 	uint16_t new[8] = { 0 };
3911 	int num_levels;
3912 	int level;
3913 	int ret;
3914 	char tmp[32];
3915 
3916 	if (IS_CHERRYVIEW(dev_priv))
3917 		num_levels = 3;
3918 	else if (IS_VALLEYVIEW(dev_priv))
3919 		num_levels = 1;
3920 	else if (IS_G4X(dev_priv))
3921 		num_levels = 3;
3922 	else
3923 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3924 
3925 	if (len >= sizeof(tmp))
3926 		return -EINVAL;
3927 
3928 	if (copy_from_user(tmp, ubuf, len))
3929 		return -EFAULT;
3930 
3931 	tmp[len] = '\0';
3932 
3933 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3934 		     &new[0], &new[1], &new[2], &new[3],
3935 		     &new[4], &new[5], &new[6], &new[7]);
3936 	if (ret != num_levels)
3937 		return -EINVAL;
3938 
3939 	drm_modeset_lock_all(dev);
3940 
3941 	for (level = 0; level < num_levels; level++)
3942 		wm[level] = new[level];
3943 
3944 	drm_modeset_unlock_all(dev);
3945 
3946 	return len;
3947 }
3948 
3949 
3950 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3951 				    size_t len, loff_t *offp)
3952 {
3953 	struct seq_file *m = file->private_data;
3954 	struct drm_i915_private *dev_priv = m->private;
3955 	uint16_t *latencies;
3956 
3957 	if (INTEL_GEN(dev_priv) >= 9)
3958 		latencies = dev_priv->wm.skl_latency;
3959 	else
3960 		latencies = dev_priv->wm.pri_latency;
3961 
3962 	return wm_latency_write(file, ubuf, len, offp, latencies);
3963 }
3964 
3965 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3966 				    size_t len, loff_t *offp)
3967 {
3968 	struct seq_file *m = file->private_data;
3969 	struct drm_i915_private *dev_priv = m->private;
3970 	uint16_t *latencies;
3971 
3972 	if (INTEL_GEN(dev_priv) >= 9)
3973 		latencies = dev_priv->wm.skl_latency;
3974 	else
3975 		latencies = dev_priv->wm.spr_latency;
3976 
3977 	return wm_latency_write(file, ubuf, len, offp, latencies);
3978 }
3979 
3980 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3981 				    size_t len, loff_t *offp)
3982 {
3983 	struct seq_file *m = file->private_data;
3984 	struct drm_i915_private *dev_priv = m->private;
3985 	uint16_t *latencies;
3986 
3987 	if (INTEL_GEN(dev_priv) >= 9)
3988 		latencies = dev_priv->wm.skl_latency;
3989 	else
3990 		latencies = dev_priv->wm.cur_latency;
3991 
3992 	return wm_latency_write(file, ubuf, len, offp, latencies);
3993 }
3994 
3995 static const struct file_operations i915_pri_wm_latency_fops = {
3996 	.owner = THIS_MODULE,
3997 	.open = pri_wm_latency_open,
3998 	.read = seq_read,
3999 	.llseek = seq_lseek,
4000 	.release = single_release,
4001 	.write = pri_wm_latency_write
4002 };
4003 
4004 static const struct file_operations i915_spr_wm_latency_fops = {
4005 	.owner = THIS_MODULE,
4006 	.open = spr_wm_latency_open,
4007 	.read = seq_read,
4008 	.llseek = seq_lseek,
4009 	.release = single_release,
4010 	.write = spr_wm_latency_write
4011 };
4012 
4013 static const struct file_operations i915_cur_wm_latency_fops = {
4014 	.owner = THIS_MODULE,
4015 	.open = cur_wm_latency_open,
4016 	.read = seq_read,
4017 	.llseek = seq_lseek,
4018 	.release = single_release,
4019 	.write = cur_wm_latency_write
4020 };
4021 
4022 static int
4023 i915_wedged_get(void *data, u64 *val)
4024 {
4025 	struct drm_i915_private *dev_priv = data;
4026 
4027 	*val = i915_terminally_wedged(&dev_priv->gpu_error);
4028 
4029 	return 0;
4030 }
4031 
4032 static int
4033 i915_wedged_set(void *data, u64 val)
4034 {
4035 	struct drm_i915_private *i915 = data;
4036 	struct intel_engine_cs *engine;
4037 	unsigned int tmp;
4038 
4039 	/*
4040 	 * There is no safeguard against this debugfs entry colliding
4041 	 * with the hangcheck calling same i915_handle_error() in
4042 	 * parallel, causing an explosion. For now we assume that the
4043 	 * test harness is responsible enough not to inject gpu hangs
4044 	 * while it is writing to 'i915_wedged'
4045 	 */
4046 
4047 	if (i915_reset_backoff(&i915->gpu_error))
4048 		return -EAGAIN;
4049 
4050 	for_each_engine_masked(engine, i915, val, tmp) {
4051 		engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4052 		engine->hangcheck.stalled = true;
4053 	}
4054 
4055 	i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4056 			  "Manually set wedged engine mask = %llx", val);
4057 
4058 	wait_on_bit(&i915->gpu_error.flags,
4059 		    I915_RESET_HANDOFF,
4060 		    TASK_UNINTERRUPTIBLE);
4061 
4062 	return 0;
4063 }
4064 
4065 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4066 			i915_wedged_get, i915_wedged_set,
4067 			"%llu\n");
4068 
4069 static int
4070 fault_irq_set(struct drm_i915_private *i915,
4071 	      unsigned long *irq,
4072 	      unsigned long val)
4073 {
4074 	int err;
4075 
4076 	err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4077 	if (err)
4078 		return err;
4079 
4080 	err = i915_gem_wait_for_idle(i915,
4081 				     I915_WAIT_LOCKED |
4082 				     I915_WAIT_INTERRUPTIBLE,
4083 				     MAX_SCHEDULE_TIMEOUT);
4084 	if (err)
4085 		goto err_unlock;
4086 
4087 	*irq = val;
4088 	mutex_unlock(&i915->drm.struct_mutex);
4089 
4090 	/* Flush idle worker to disarm irq */
4091 	drain_delayed_work(&i915->gt.idle_work);
4092 
4093 	return 0;
4094 
4095 err_unlock:
4096 	mutex_unlock(&i915->drm.struct_mutex);
4097 	return err;
4098 }
4099 
4100 static int
4101 i915_ring_missed_irq_get(void *data, u64 *val)
4102 {
4103 	struct drm_i915_private *dev_priv = data;
4104 
4105 	*val = dev_priv->gpu_error.missed_irq_rings;
4106 	return 0;
4107 }
4108 
4109 static int
4110 i915_ring_missed_irq_set(void *data, u64 val)
4111 {
4112 	struct drm_i915_private *i915 = data;
4113 
4114 	return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4115 }
4116 
4117 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4118 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4119 			"0x%08llx\n");
4120 
4121 static int
4122 i915_ring_test_irq_get(void *data, u64 *val)
4123 {
4124 	struct drm_i915_private *dev_priv = data;
4125 
4126 	*val = dev_priv->gpu_error.test_irq_rings;
4127 
4128 	return 0;
4129 }
4130 
4131 static int
4132 i915_ring_test_irq_set(void *data, u64 val)
4133 {
4134 	struct drm_i915_private *i915 = data;
4135 
4136 	/* GuC keeps the user interrupt permanently enabled for submission */
4137 	if (USES_GUC_SUBMISSION(i915))
4138 		return -ENODEV;
4139 
4140 	/*
4141 	 * From icl, we can no longer individually mask interrupt generation
4142 	 * from each engine.
4143 	 */
4144 	if (INTEL_GEN(i915) >= 11)
4145 		return -ENODEV;
4146 
4147 	val &= INTEL_INFO(i915)->ring_mask;
4148 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4149 
4150 	return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4151 }
4152 
4153 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4154 			i915_ring_test_irq_get, i915_ring_test_irq_set,
4155 			"0x%08llx\n");
4156 
4157 #define DROP_UNBOUND	BIT(0)
4158 #define DROP_BOUND	BIT(1)
4159 #define DROP_RETIRE	BIT(2)
4160 #define DROP_ACTIVE	BIT(3)
4161 #define DROP_FREED	BIT(4)
4162 #define DROP_SHRINK_ALL	BIT(5)
4163 #define DROP_IDLE	BIT(6)
4164 #define DROP_RESET_ACTIVE	BIT(7)
4165 #define DROP_RESET_SEQNO	BIT(8)
4166 #define DROP_ALL (DROP_UNBOUND	| \
4167 		  DROP_BOUND	| \
4168 		  DROP_RETIRE	| \
4169 		  DROP_ACTIVE	| \
4170 		  DROP_FREED	| \
4171 		  DROP_SHRINK_ALL |\
4172 		  DROP_IDLE	| \
4173 		  DROP_RESET_ACTIVE | \
4174 		  DROP_RESET_SEQNO)
4175 static int
4176 i915_drop_caches_get(void *data, u64 *val)
4177 {
4178 	*val = DROP_ALL;
4179 
4180 	return 0;
4181 }
4182 
4183 static int
4184 i915_drop_caches_set(void *data, u64 val)
4185 {
4186 	struct drm_i915_private *i915 = data;
4187 	int ret = 0;
4188 
4189 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4190 		  val, val & DROP_ALL);
4191 	intel_runtime_pm_get(i915);
4192 
4193 	if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4194 		i915_gem_set_wedged(i915);
4195 
4196 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4197 	 * on ioctls on -EAGAIN. */
4198 	if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4199 		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
4200 		if (ret)
4201 			goto out;
4202 
4203 		if (val & DROP_ACTIVE)
4204 			ret = i915_gem_wait_for_idle(i915,
4205 						     I915_WAIT_INTERRUPTIBLE |
4206 						     I915_WAIT_LOCKED,
4207 						     MAX_SCHEDULE_TIMEOUT);
4208 
4209 		if (ret == 0 && val & DROP_RESET_SEQNO)
4210 			ret = i915_gem_set_global_seqno(&i915->drm, 1);
4211 
4212 		if (val & DROP_RETIRE)
4213 			i915_retire_requests(i915);
4214 
4215 		mutex_unlock(&i915->drm.struct_mutex);
4216 	}
4217 
4218 	if (val & DROP_RESET_ACTIVE &&
4219 	    i915_terminally_wedged(&i915->gpu_error)) {
4220 		i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4221 		wait_on_bit(&i915->gpu_error.flags,
4222 			    I915_RESET_HANDOFF,
4223 			    TASK_UNINTERRUPTIBLE);
4224 	}
4225 
4226 	fs_reclaim_acquire(GFP_KERNEL);
4227 	if (val & DROP_BOUND)
4228 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4229 
4230 	if (val & DROP_UNBOUND)
4231 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4232 
4233 	if (val & DROP_SHRINK_ALL)
4234 		i915_gem_shrink_all(i915);
4235 	fs_reclaim_release(GFP_KERNEL);
4236 
4237 	if (val & DROP_IDLE) {
4238 		do {
4239 			if (READ_ONCE(i915->gt.active_requests))
4240 				flush_delayed_work(&i915->gt.retire_work);
4241 			drain_delayed_work(&i915->gt.idle_work);
4242 		} while (READ_ONCE(i915->gt.awake));
4243 	}
4244 
4245 	if (val & DROP_FREED)
4246 		i915_gem_drain_freed_objects(i915);
4247 
4248 out:
4249 	intel_runtime_pm_put(i915);
4250 
4251 	return ret;
4252 }
4253 
4254 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4255 			i915_drop_caches_get, i915_drop_caches_set,
4256 			"0x%08llx\n");
4257 
4258 static int
4259 i915_cache_sharing_get(void *data, u64 *val)
4260 {
4261 	struct drm_i915_private *dev_priv = data;
4262 	u32 snpcr;
4263 
4264 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4265 		return -ENODEV;
4266 
4267 	intel_runtime_pm_get(dev_priv);
4268 
4269 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4270 
4271 	intel_runtime_pm_put(dev_priv);
4272 
4273 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4274 
4275 	return 0;
4276 }
4277 
4278 static int
4279 i915_cache_sharing_set(void *data, u64 val)
4280 {
4281 	struct drm_i915_private *dev_priv = data;
4282 	u32 snpcr;
4283 
4284 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4285 		return -ENODEV;
4286 
4287 	if (val > 3)
4288 		return -EINVAL;
4289 
4290 	intel_runtime_pm_get(dev_priv);
4291 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4292 
4293 	/* Update the cache sharing policy here as well */
4294 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4295 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
4296 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4297 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4298 
4299 	intel_runtime_pm_put(dev_priv);
4300 	return 0;
4301 }
4302 
4303 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4304 			i915_cache_sharing_get, i915_cache_sharing_set,
4305 			"%llu\n");
4306 
4307 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4308 					  struct sseu_dev_info *sseu)
4309 {
4310 #define SS_MAX 2
4311 	const int ss_max = SS_MAX;
4312 	u32 sig1[SS_MAX], sig2[SS_MAX];
4313 	int ss;
4314 
4315 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4316 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4317 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4318 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4319 
4320 	for (ss = 0; ss < ss_max; ss++) {
4321 		unsigned int eu_cnt;
4322 
4323 		if (sig1[ss] & CHV_SS_PG_ENABLE)
4324 			/* skip disabled subslice */
4325 			continue;
4326 
4327 		sseu->slice_mask = BIT(0);
4328 		sseu->subslice_mask[0] |= BIT(ss);
4329 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4330 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4331 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4332 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4333 		sseu->eu_total += eu_cnt;
4334 		sseu->eu_per_subslice = max_t(unsigned int,
4335 					      sseu->eu_per_subslice, eu_cnt);
4336 	}
4337 #undef SS_MAX
4338 }
4339 
4340 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4341 				     struct sseu_dev_info *sseu)
4342 {
4343 #define SS_MAX 6
4344 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4345 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4346 	int s, ss;
4347 
4348 	for (s = 0; s < info->sseu.max_slices; s++) {
4349 		/*
4350 		 * FIXME: Valid SS Mask respects the spec and read
4351 		 * only valid bits for those registers, excluding reserverd
4352 		 * although this seems wrong because it would leave many
4353 		 * subslices without ACK.
4354 		 */
4355 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4356 			GEN10_PGCTL_VALID_SS_MASK(s);
4357 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4358 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4359 	}
4360 
4361 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4362 		     GEN9_PGCTL_SSA_EU19_ACK |
4363 		     GEN9_PGCTL_SSA_EU210_ACK |
4364 		     GEN9_PGCTL_SSA_EU311_ACK;
4365 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4366 		     GEN9_PGCTL_SSB_EU19_ACK |
4367 		     GEN9_PGCTL_SSB_EU210_ACK |
4368 		     GEN9_PGCTL_SSB_EU311_ACK;
4369 
4370 	for (s = 0; s < info->sseu.max_slices; s++) {
4371 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4372 			/* skip disabled slice */
4373 			continue;
4374 
4375 		sseu->slice_mask |= BIT(s);
4376 		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4377 
4378 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4379 			unsigned int eu_cnt;
4380 
4381 			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4382 				/* skip disabled subslice */
4383 				continue;
4384 
4385 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4386 					       eu_mask[ss % 2]);
4387 			sseu->eu_total += eu_cnt;
4388 			sseu->eu_per_subslice = max_t(unsigned int,
4389 						      sseu->eu_per_subslice,
4390 						      eu_cnt);
4391 		}
4392 	}
4393 #undef SS_MAX
4394 }
4395 
4396 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4397 				    struct sseu_dev_info *sseu)
4398 {
4399 #define SS_MAX 3
4400 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4401 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4402 	int s, ss;
4403 
4404 	for (s = 0; s < info->sseu.max_slices; s++) {
4405 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4406 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4407 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4408 	}
4409 
4410 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4411 		     GEN9_PGCTL_SSA_EU19_ACK |
4412 		     GEN9_PGCTL_SSA_EU210_ACK |
4413 		     GEN9_PGCTL_SSA_EU311_ACK;
4414 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4415 		     GEN9_PGCTL_SSB_EU19_ACK |
4416 		     GEN9_PGCTL_SSB_EU210_ACK |
4417 		     GEN9_PGCTL_SSB_EU311_ACK;
4418 
4419 	for (s = 0; s < info->sseu.max_slices; s++) {
4420 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4421 			/* skip disabled slice */
4422 			continue;
4423 
4424 		sseu->slice_mask |= BIT(s);
4425 
4426 		if (IS_GEN9_BC(dev_priv))
4427 			sseu->subslice_mask[s] =
4428 				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4429 
4430 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4431 			unsigned int eu_cnt;
4432 
4433 			if (IS_GEN9_LP(dev_priv)) {
4434 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4435 					/* skip disabled subslice */
4436 					continue;
4437 
4438 				sseu->subslice_mask[s] |= BIT(ss);
4439 			}
4440 
4441 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4442 					       eu_mask[ss%2]);
4443 			sseu->eu_total += eu_cnt;
4444 			sseu->eu_per_subslice = max_t(unsigned int,
4445 						      sseu->eu_per_subslice,
4446 						      eu_cnt);
4447 		}
4448 	}
4449 #undef SS_MAX
4450 }
4451 
4452 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4453 					 struct sseu_dev_info *sseu)
4454 {
4455 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4456 	int s;
4457 
4458 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4459 
4460 	if (sseu->slice_mask) {
4461 		sseu->eu_per_subslice =
4462 				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4463 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4464 			sseu->subslice_mask[s] =
4465 				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4466 		}
4467 		sseu->eu_total = sseu->eu_per_subslice *
4468 				 sseu_subslice_total(sseu);
4469 
4470 		/* subtract fused off EU(s) from enabled slice(s) */
4471 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4472 			u8 subslice_7eu =
4473 				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4474 
4475 			sseu->eu_total -= hweight8(subslice_7eu);
4476 		}
4477 	}
4478 }
4479 
4480 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4481 				 const struct sseu_dev_info *sseu)
4482 {
4483 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4484 	const char *type = is_available_info ? "Available" : "Enabled";
4485 	int s;
4486 
4487 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
4488 		   sseu->slice_mask);
4489 	seq_printf(m, "  %s Slice Total: %u\n", type,
4490 		   hweight8(sseu->slice_mask));
4491 	seq_printf(m, "  %s Subslice Total: %u\n", type,
4492 		   sseu_subslice_total(sseu));
4493 	for (s = 0; s < fls(sseu->slice_mask); s++) {
4494 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4495 			   s, hweight8(sseu->subslice_mask[s]));
4496 	}
4497 	seq_printf(m, "  %s EU Total: %u\n", type,
4498 		   sseu->eu_total);
4499 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4500 		   sseu->eu_per_subslice);
4501 
4502 	if (!is_available_info)
4503 		return;
4504 
4505 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4506 	if (HAS_POOLED_EU(dev_priv))
4507 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4508 
4509 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4510 		   yesno(sseu->has_slice_pg));
4511 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4512 		   yesno(sseu->has_subslice_pg));
4513 	seq_printf(m, "  Has EU Power Gating: %s\n",
4514 		   yesno(sseu->has_eu_pg));
4515 }
4516 
4517 static int i915_sseu_status(struct seq_file *m, void *unused)
4518 {
4519 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4520 	struct sseu_dev_info sseu;
4521 
4522 	if (INTEL_GEN(dev_priv) < 8)
4523 		return -ENODEV;
4524 
4525 	seq_puts(m, "SSEU Device Info\n");
4526 	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4527 
4528 	seq_puts(m, "SSEU Device Status\n");
4529 	memset(&sseu, 0, sizeof(sseu));
4530 	sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4531 	sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4532 	sseu.max_eus_per_subslice =
4533 		INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4534 
4535 	intel_runtime_pm_get(dev_priv);
4536 
4537 	if (IS_CHERRYVIEW(dev_priv)) {
4538 		cherryview_sseu_device_status(dev_priv, &sseu);
4539 	} else if (IS_BROADWELL(dev_priv)) {
4540 		broadwell_sseu_device_status(dev_priv, &sseu);
4541 	} else if (IS_GEN9(dev_priv)) {
4542 		gen9_sseu_device_status(dev_priv, &sseu);
4543 	} else if (INTEL_GEN(dev_priv) >= 10) {
4544 		gen10_sseu_device_status(dev_priv, &sseu);
4545 	}
4546 
4547 	intel_runtime_pm_put(dev_priv);
4548 
4549 	i915_print_sseu_info(m, false, &sseu);
4550 
4551 	return 0;
4552 }
4553 
4554 static int i915_forcewake_open(struct inode *inode, struct file *file)
4555 {
4556 	struct drm_i915_private *i915 = inode->i_private;
4557 
4558 	if (INTEL_GEN(i915) < 6)
4559 		return 0;
4560 
4561 	intel_runtime_pm_get(i915);
4562 	intel_uncore_forcewake_user_get(i915);
4563 
4564 	return 0;
4565 }
4566 
4567 static int i915_forcewake_release(struct inode *inode, struct file *file)
4568 {
4569 	struct drm_i915_private *i915 = inode->i_private;
4570 
4571 	if (INTEL_GEN(i915) < 6)
4572 		return 0;
4573 
4574 	intel_uncore_forcewake_user_put(i915);
4575 	intel_runtime_pm_put(i915);
4576 
4577 	return 0;
4578 }
4579 
4580 static const struct file_operations i915_forcewake_fops = {
4581 	.owner = THIS_MODULE,
4582 	.open = i915_forcewake_open,
4583 	.release = i915_forcewake_release,
4584 };
4585 
4586 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4587 {
4588 	struct drm_i915_private *dev_priv = m->private;
4589 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4590 
4591 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4592 	seq_printf(m, "Detected: %s\n",
4593 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4594 
4595 	return 0;
4596 }
4597 
4598 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4599 					const char __user *ubuf, size_t len,
4600 					loff_t *offp)
4601 {
4602 	struct seq_file *m = file->private_data;
4603 	struct drm_i915_private *dev_priv = m->private;
4604 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4605 	unsigned int new_threshold;
4606 	int i;
4607 	char *newline;
4608 	char tmp[16];
4609 
4610 	if (len >= sizeof(tmp))
4611 		return -EINVAL;
4612 
4613 	if (copy_from_user(tmp, ubuf, len))
4614 		return -EFAULT;
4615 
4616 	tmp[len] = '\0';
4617 
4618 	/* Strip newline, if any */
4619 	newline = strchr(tmp, '\n');
4620 	if (newline)
4621 		*newline = '\0';
4622 
4623 	if (strcmp(tmp, "reset") == 0)
4624 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4625 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4626 		return -EINVAL;
4627 
4628 	if (new_threshold > 0)
4629 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4630 			      new_threshold);
4631 	else
4632 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4633 
4634 	spin_lock_irq(&dev_priv->irq_lock);
4635 	hotplug->hpd_storm_threshold = new_threshold;
4636 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4637 	for_each_hpd_pin(i)
4638 		hotplug->stats[i].count = 0;
4639 	spin_unlock_irq(&dev_priv->irq_lock);
4640 
4641 	/* Re-enable hpd immediately if we were in an irq storm */
4642 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4643 
4644 	return len;
4645 }
4646 
4647 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4648 {
4649 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4650 }
4651 
4652 static const struct file_operations i915_hpd_storm_ctl_fops = {
4653 	.owner = THIS_MODULE,
4654 	.open = i915_hpd_storm_ctl_open,
4655 	.read = seq_read,
4656 	.llseek = seq_lseek,
4657 	.release = single_release,
4658 	.write = i915_hpd_storm_ctl_write
4659 };
4660 
4661 static int i915_drrs_ctl_set(void *data, u64 val)
4662 {
4663 	struct drm_i915_private *dev_priv = data;
4664 	struct drm_device *dev = &dev_priv->drm;
4665 	struct intel_crtc *crtc;
4666 
4667 	if (INTEL_GEN(dev_priv) < 7)
4668 		return -ENODEV;
4669 
4670 	for_each_intel_crtc(dev, crtc) {
4671 		struct drm_connector_list_iter conn_iter;
4672 		struct intel_crtc_state *crtc_state;
4673 		struct drm_connector *connector;
4674 		struct drm_crtc_commit *commit;
4675 		int ret;
4676 
4677 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4678 		if (ret)
4679 			return ret;
4680 
4681 		crtc_state = to_intel_crtc_state(crtc->base.state);
4682 
4683 		if (!crtc_state->base.active ||
4684 		    !crtc_state->has_drrs)
4685 			goto out;
4686 
4687 		commit = crtc_state->base.commit;
4688 		if (commit) {
4689 			ret = wait_for_completion_interruptible(&commit->hw_done);
4690 			if (ret)
4691 				goto out;
4692 		}
4693 
4694 		drm_connector_list_iter_begin(dev, &conn_iter);
4695 		drm_for_each_connector_iter(connector, &conn_iter) {
4696 			struct intel_encoder *encoder;
4697 			struct intel_dp *intel_dp;
4698 
4699 			if (!(crtc_state->base.connector_mask &
4700 			      drm_connector_mask(connector)))
4701 				continue;
4702 
4703 			encoder = intel_attached_encoder(connector);
4704 			if (encoder->type != INTEL_OUTPUT_EDP)
4705 				continue;
4706 
4707 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4708 						val ? "en" : "dis", val);
4709 
4710 			intel_dp = enc_to_intel_dp(&encoder->base);
4711 			if (val)
4712 				intel_edp_drrs_enable(intel_dp,
4713 						      crtc_state);
4714 			else
4715 				intel_edp_drrs_disable(intel_dp,
4716 						       crtc_state);
4717 		}
4718 		drm_connector_list_iter_end(&conn_iter);
4719 
4720 out:
4721 		drm_modeset_unlock(&crtc->base.mutex);
4722 		if (ret)
4723 			return ret;
4724 	}
4725 
4726 	return 0;
4727 }
4728 
4729 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4730 
4731 static ssize_t
4732 i915_fifo_underrun_reset_write(struct file *filp,
4733 			       const char __user *ubuf,
4734 			       size_t cnt, loff_t *ppos)
4735 {
4736 	struct drm_i915_private *dev_priv = filp->private_data;
4737 	struct intel_crtc *intel_crtc;
4738 	struct drm_device *dev = &dev_priv->drm;
4739 	int ret;
4740 	bool reset;
4741 
4742 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4743 	if (ret)
4744 		return ret;
4745 
4746 	if (!reset)
4747 		return cnt;
4748 
4749 	for_each_intel_crtc(dev, intel_crtc) {
4750 		struct drm_crtc_commit *commit;
4751 		struct intel_crtc_state *crtc_state;
4752 
4753 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4754 		if (ret)
4755 			return ret;
4756 
4757 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4758 		commit = crtc_state->base.commit;
4759 		if (commit) {
4760 			ret = wait_for_completion_interruptible(&commit->hw_done);
4761 			if (!ret)
4762 				ret = wait_for_completion_interruptible(&commit->flip_done);
4763 		}
4764 
4765 		if (!ret && crtc_state->base.active) {
4766 			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4767 				      pipe_name(intel_crtc->pipe));
4768 
4769 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4770 		}
4771 
4772 		drm_modeset_unlock(&intel_crtc->base.mutex);
4773 
4774 		if (ret)
4775 			return ret;
4776 	}
4777 
4778 	ret = intel_fbc_reset_underrun(dev_priv);
4779 	if (ret)
4780 		return ret;
4781 
4782 	return cnt;
4783 }
4784 
4785 static const struct file_operations i915_fifo_underrun_reset_ops = {
4786 	.owner = THIS_MODULE,
4787 	.open = simple_open,
4788 	.write = i915_fifo_underrun_reset_write,
4789 	.llseek = default_llseek,
4790 };
4791 
4792 static const struct drm_info_list i915_debugfs_list[] = {
4793 	{"i915_capabilities", i915_capabilities, 0},
4794 	{"i915_gem_objects", i915_gem_object_info, 0},
4795 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4796 	{"i915_gem_stolen", i915_gem_stolen_list_info },
4797 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4798 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4799 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4800 	{"i915_guc_info", i915_guc_info, 0},
4801 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4802 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4803 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4804 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4805 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4806 	{"i915_frequency_info", i915_frequency_info, 0},
4807 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4808 	{"i915_reset_info", i915_reset_info, 0},
4809 	{"i915_drpc_info", i915_drpc_info, 0},
4810 	{"i915_emon_status", i915_emon_status, 0},
4811 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4812 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4813 	{"i915_fbc_status", i915_fbc_status, 0},
4814 	{"i915_ips_status", i915_ips_status, 0},
4815 	{"i915_sr_status", i915_sr_status, 0},
4816 	{"i915_opregion", i915_opregion, 0},
4817 	{"i915_vbt", i915_vbt, 0},
4818 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4819 	{"i915_context_status", i915_context_status, 0},
4820 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4821 	{"i915_swizzle_info", i915_swizzle_info, 0},
4822 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4823 	{"i915_llc", i915_llc, 0},
4824 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4825 	{"i915_energy_uJ", i915_energy_uJ, 0},
4826 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4827 	{"i915_power_domain_info", i915_power_domain_info, 0},
4828 	{"i915_dmc_info", i915_dmc_info, 0},
4829 	{"i915_display_info", i915_display_info, 0},
4830 	{"i915_engine_info", i915_engine_info, 0},
4831 	{"i915_rcs_topology", i915_rcs_topology, 0},
4832 	{"i915_shrinker_info", i915_shrinker_info, 0},
4833 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4834 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4835 	{"i915_wa_registers", i915_wa_registers, 0},
4836 	{"i915_ddb_info", i915_ddb_info, 0},
4837 	{"i915_sseu_status", i915_sseu_status, 0},
4838 	{"i915_drrs_status", i915_drrs_status, 0},
4839 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4840 };
4841 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4842 
4843 static const struct i915_debugfs_files {
4844 	const char *name;
4845 	const struct file_operations *fops;
4846 } i915_debugfs_files[] = {
4847 	{"i915_wedged", &i915_wedged_fops},
4848 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4849 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4850 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4851 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4852 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4853 	{"i915_error_state", &i915_error_state_fops},
4854 	{"i915_gpu_info", &i915_gpu_info_fops},
4855 #endif
4856 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4857 	{"i915_next_seqno", &i915_next_seqno_fops},
4858 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4859 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4860 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4861 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4862 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4863 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4864 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4865 	{"i915_guc_log_level", &i915_guc_log_level_fops},
4866 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4867 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4868 	{"i915_ipc_status", &i915_ipc_status_fops},
4869 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4870 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4871 };
4872 
4873 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4874 {
4875 	struct drm_minor *minor = dev_priv->drm.primary;
4876 	struct dentry *ent;
4877 	int i;
4878 
4879 	ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4880 				  minor->debugfs_root, to_i915(minor->dev),
4881 				  &i915_forcewake_fops);
4882 	if (!ent)
4883 		return -ENOMEM;
4884 
4885 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4886 		ent = debugfs_create_file(i915_debugfs_files[i].name,
4887 					  S_IRUGO | S_IWUSR,
4888 					  minor->debugfs_root,
4889 					  to_i915(minor->dev),
4890 					  i915_debugfs_files[i].fops);
4891 		if (!ent)
4892 			return -ENOMEM;
4893 	}
4894 
4895 	return drm_debugfs_create_files(i915_debugfs_list,
4896 					I915_DEBUGFS_ENTRIES,
4897 					minor->debugfs_root, minor);
4898 }
4899 
4900 struct dpcd_block {
4901 	/* DPCD dump start address. */
4902 	unsigned int offset;
4903 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4904 	unsigned int end;
4905 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4906 	size_t size;
4907 	/* Only valid for eDP. */
4908 	bool edp;
4909 };
4910 
4911 static const struct dpcd_block i915_dpcd_debug[] = {
4912 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4913 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4914 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4915 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4916 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4917 	{ .offset = DP_SET_POWER },
4918 	{ .offset = DP_EDP_DPCD_REV },
4919 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4920 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4921 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4922 };
4923 
4924 static int i915_dpcd_show(struct seq_file *m, void *data)
4925 {
4926 	struct drm_connector *connector = m->private;
4927 	struct intel_dp *intel_dp =
4928 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4929 	uint8_t buf[16];
4930 	ssize_t err;
4931 	int i;
4932 
4933 	if (connector->status != connector_status_connected)
4934 		return -ENODEV;
4935 
4936 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4937 		const struct dpcd_block *b = &i915_dpcd_debug[i];
4938 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4939 
4940 		if (b->edp &&
4941 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4942 			continue;
4943 
4944 		/* low tech for now */
4945 		if (WARN_ON(size > sizeof(buf)))
4946 			continue;
4947 
4948 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4949 		if (err < 0)
4950 			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4951 		else
4952 			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4953 	}
4954 
4955 	return 0;
4956 }
4957 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4958 
4959 static int i915_panel_show(struct seq_file *m, void *data)
4960 {
4961 	struct drm_connector *connector = m->private;
4962 	struct intel_dp *intel_dp =
4963 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4964 
4965 	if (connector->status != connector_status_connected)
4966 		return -ENODEV;
4967 
4968 	seq_printf(m, "Panel power up delay: %d\n",
4969 		   intel_dp->panel_power_up_delay);
4970 	seq_printf(m, "Panel power down delay: %d\n",
4971 		   intel_dp->panel_power_down_delay);
4972 	seq_printf(m, "Backlight on delay: %d\n",
4973 		   intel_dp->backlight_on_delay);
4974 	seq_printf(m, "Backlight off delay: %d\n",
4975 		   intel_dp->backlight_off_delay);
4976 
4977 	return 0;
4978 }
4979 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4980 
4981 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4982 {
4983 	struct drm_connector *connector = m->private;
4984 	struct intel_connector *intel_connector = to_intel_connector(connector);
4985 
4986 	if (connector->status != connector_status_connected)
4987 		return -ENODEV;
4988 
4989 	/* HDCP is supported by connector */
4990 	if (!intel_connector->hdcp.shim)
4991 		return -EINVAL;
4992 
4993 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
4994 		   connector->base.id);
4995 	seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4996 		   "None" : "HDCP1.4");
4997 	seq_puts(m, "\n");
4998 
4999 	return 0;
5000 }
5001 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5002 
5003 /**
5004  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5005  * @connector: pointer to a registered drm_connector
5006  *
5007  * Cleanup will be done by drm_connector_unregister() through a call to
5008  * drm_debugfs_connector_remove().
5009  *
5010  * Returns 0 on success, negative error codes on error.
5011  */
5012 int i915_debugfs_connector_add(struct drm_connector *connector)
5013 {
5014 	struct dentry *root = connector->debugfs_entry;
5015 
5016 	/* The connector must have been registered beforehands. */
5017 	if (!root)
5018 		return -ENODEV;
5019 
5020 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5021 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5022 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
5023 				    connector, &i915_dpcd_fops);
5024 
5025 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
5026 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5027 				    connector, &i915_panel_fops);
5028 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5029 				    connector, &i915_psr_sink_status_fops);
5030 	}
5031 
5032 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5033 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5034 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5035 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5036 				    connector, &i915_hdcp_sink_capability_fops);
5037 	}
5038 
5039 	return 0;
5040 }
5041