1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34 
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37 	return to_i915(node->minor->dev);
38 }
39 
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
44 	struct drm_printer p = drm_seq_file_printer(m);
45 
46 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47 	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49 
50 	intel_device_info_dump_flags(info, &p);
51 	intel_device_info_dump_runtime(info, &p);
52 	intel_driver_caps_print(&dev_priv->caps, &p);
53 
54 	kernel_param_lock(THIS_MODULE);
55 	i915_params_dump(&i915_modparams, &p);
56 	kernel_param_unlock(THIS_MODULE);
57 
58 	return 0;
59 }
60 
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63 	return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65 
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68 	return obj->pin_global ? 'p' : ' ';
69 }
70 
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73 	switch (i915_gem_object_get_tiling(obj)) {
74 	default:
75 	case I915_TILING_NONE: return ' ';
76 	case I915_TILING_X: return 'X';
77 	case I915_TILING_Y: return 'Y';
78 	}
79 }
80 
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83 	return obj->userfault_count ? 'g' : ' ';
84 }
85 
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88 	return obj->mm.mapping ? 'M' : ' ';
89 }
90 
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93 	u64 size = 0;
94 	struct i915_vma *vma;
95 
96 	for_each_ggtt_vma(vma, obj) {
97 		if (drm_mm_node_allocated(&vma->node))
98 			size += vma->node.size;
99 	}
100 
101 	return size;
102 }
103 
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107 	size_t x = 0;
108 
109 	switch (page_sizes) {
110 	case 0:
111 		return "";
112 	case I915_GTT_PAGE_SIZE_4K:
113 		return "4K";
114 	case I915_GTT_PAGE_SIZE_64K:
115 		return "64K";
116 	case I915_GTT_PAGE_SIZE_2M:
117 		return "2M";
118 	default:
119 		if (!buf)
120 			return "M";
121 
122 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 			x += snprintf(buf + x, len - x, "2M, ");
124 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 			x += snprintf(buf + x, len - x, "64K, ");
126 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 			x += snprintf(buf + x, len - x, "4K, ");
128 		buf[x-2] = '\0';
129 
130 		return buf;
131 	}
132 }
133 
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138 	struct intel_engine_cs *engine;
139 	struct i915_vma *vma;
140 	unsigned int frontbuffer_bits;
141 	int pin_count = 0;
142 
143 	lockdep_assert_held(&obj->base.dev->struct_mutex);
144 
145 	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146 		   &obj->base,
147 		   get_active_flag(obj),
148 		   get_pin_flag(obj),
149 		   get_tiling_flag(obj),
150 		   get_global_flag(obj),
151 		   get_pin_mapped_flag(obj),
152 		   obj->base.size / 1024,
153 		   obj->read_domains,
154 		   obj->write_domain,
155 		   i915_cache_level_str(dev_priv, obj->cache_level),
156 		   obj->mm.dirty ? " dirty" : "",
157 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158 	if (obj->base.name)
159 		seq_printf(m, " (name: %d)", obj->base.name);
160 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
161 		if (i915_vma_is_pinned(vma))
162 			pin_count++;
163 	}
164 	seq_printf(m, " (pinned x %d)", pin_count);
165 	if (obj->pin_global)
166 		seq_printf(m, " (global)");
167 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
168 		if (!drm_mm_node_allocated(&vma->node))
169 			continue;
170 
171 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
173 			   vma->node.start, vma->node.size,
174 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175 		if (i915_vma_is_ggtt(vma)) {
176 			switch (vma->ggtt_view.type) {
177 			case I915_GGTT_VIEW_NORMAL:
178 				seq_puts(m, ", normal");
179 				break;
180 
181 			case I915_GGTT_VIEW_PARTIAL:
182 				seq_printf(m, ", partial [%08llx+%x]",
183 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
185 				break;
186 
187 			case I915_GGTT_VIEW_ROTATED:
188 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189 					   vma->ggtt_view.rotated.plane[0].width,
190 					   vma->ggtt_view.rotated.plane[0].height,
191 					   vma->ggtt_view.rotated.plane[0].stride,
192 					   vma->ggtt_view.rotated.plane[0].offset,
193 					   vma->ggtt_view.rotated.plane[1].width,
194 					   vma->ggtt_view.rotated.plane[1].height,
195 					   vma->ggtt_view.rotated.plane[1].stride,
196 					   vma->ggtt_view.rotated.plane[1].offset);
197 				break;
198 
199 			default:
200 				MISSING_CASE(vma->ggtt_view.type);
201 				break;
202 			}
203 		}
204 		if (vma->fence)
205 			seq_printf(m, " , fence: %d%s",
206 				   vma->fence->id,
207 				   i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208 		seq_puts(m, ")");
209 	}
210 	if (obj->stolen)
211 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212 
213 	engine = i915_gem_object_last_write_engine(obj);
214 	if (engine)
215 		seq_printf(m, " (%s)", engine->name);
216 
217 	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 	if (frontbuffer_bits)
219 		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221 
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224 	const struct drm_i915_gem_object *a =
225 		*(const struct drm_i915_gem_object **)A;
226 	const struct drm_i915_gem_object *b =
227 		*(const struct drm_i915_gem_object **)B;
228 
229 	if (a->stolen->start < b->stolen->start)
230 		return -1;
231 	if (a->stolen->start > b->stolen->start)
232 		return 1;
233 	return 0;
234 }
235 
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 	struct drm_device *dev = &dev_priv->drm;
240 	struct drm_i915_gem_object **objects;
241 	struct drm_i915_gem_object *obj;
242 	u64 total_obj_size, total_gtt_size;
243 	unsigned long total, count, n;
244 	int ret;
245 
246 	total = READ_ONCE(dev_priv->mm.object_count);
247 	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248 	if (!objects)
249 		return -ENOMEM;
250 
251 	ret = mutex_lock_interruptible(&dev->struct_mutex);
252 	if (ret)
253 		goto out;
254 
255 	total_obj_size = total_gtt_size = count = 0;
256 
257 	spin_lock(&dev_priv->mm.obj_lock);
258 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259 		if (count == total)
260 			break;
261 
262 		if (obj->stolen == NULL)
263 			continue;
264 
265 		objects[count++] = obj;
266 		total_obj_size += obj->base.size;
267 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268 
269 	}
270 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271 		if (count == total)
272 			break;
273 
274 		if (obj->stolen == NULL)
275 			continue;
276 
277 		objects[count++] = obj;
278 		total_obj_size += obj->base.size;
279 	}
280 	spin_unlock(&dev_priv->mm.obj_lock);
281 
282 	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283 
284 	seq_puts(m, "Stolen:\n");
285 	for (n = 0; n < count; n++) {
286 		seq_puts(m, "   ");
287 		describe_obj(m, objects[n]);
288 		seq_putc(m, '\n');
289 	}
290 	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291 		   count, total_obj_size, total_gtt_size);
292 
293 	mutex_unlock(&dev->struct_mutex);
294 out:
295 	kvfree(objects);
296 	return ret;
297 }
298 
299 struct file_stats {
300 	struct drm_i915_file_private *file_priv;
301 	unsigned long count;
302 	u64 total, unbound;
303 	u64 global, shared;
304 	u64 active, inactive;
305 };
306 
307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309 	struct drm_i915_gem_object *obj = ptr;
310 	struct file_stats *stats = data;
311 	struct i915_vma *vma;
312 
313 	lockdep_assert_held(&obj->base.dev->struct_mutex);
314 
315 	stats->count++;
316 	stats->total += obj->base.size;
317 	if (!obj->bind_count)
318 		stats->unbound += obj->base.size;
319 	if (obj->base.name || obj->base.dma_buf)
320 		stats->shared += obj->base.size;
321 
322 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 		if (!drm_mm_node_allocated(&vma->node))
324 			continue;
325 
326 		if (i915_vma_is_ggtt(vma)) {
327 			stats->global += vma->node.size;
328 		} else {
329 			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330 
331 			if (ppgtt->vm.file != stats->file_priv)
332 				continue;
333 		}
334 
335 		if (i915_vma_is_active(vma))
336 			stats->active += vma->node.size;
337 		else
338 			stats->inactive += vma->node.size;
339 	}
340 
341 	return 0;
342 }
343 
344 #define print_file_stats(m, name, stats) do { \
345 	if (stats.count) \
346 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347 			   name, \
348 			   stats.count, \
349 			   stats.total, \
350 			   stats.active, \
351 			   stats.inactive, \
352 			   stats.global, \
353 			   stats.shared, \
354 			   stats.unbound); \
355 } while (0)
356 
357 static void print_batch_pool_stats(struct seq_file *m,
358 				   struct drm_i915_private *dev_priv)
359 {
360 	struct drm_i915_gem_object *obj;
361 	struct file_stats stats;
362 	struct intel_engine_cs *engine;
363 	enum intel_engine_id id;
364 	int j;
365 
366 	memset(&stats, 0, sizeof(stats));
367 
368 	for_each_engine(engine, dev_priv, id) {
369 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370 			list_for_each_entry(obj,
371 					    &engine->batch_pool.cache_list[j],
372 					    batch_pool_link)
373 				per_file_stats(0, obj, &stats);
374 		}
375 	}
376 
377 	print_file_stats(m, "[k]batch pool", stats);
378 }
379 
380 static int per_file_ctx_stats(int idx, void *ptr, void *data)
381 {
382 	struct i915_gem_context *ctx = ptr;
383 	struct intel_engine_cs *engine;
384 	enum intel_engine_id id;
385 
386 	for_each_engine(engine, ctx->i915, id) {
387 		struct intel_context *ce = to_intel_context(ctx, engine);
388 
389 		if (ce->state)
390 			per_file_stats(0, ce->state->obj, data);
391 		if (ce->ring)
392 			per_file_stats(0, ce->ring->vma->obj, data);
393 	}
394 
395 	return 0;
396 }
397 
398 static void print_context_stats(struct seq_file *m,
399 				struct drm_i915_private *dev_priv)
400 {
401 	struct drm_device *dev = &dev_priv->drm;
402 	struct file_stats stats;
403 	struct drm_file *file;
404 
405 	memset(&stats, 0, sizeof(stats));
406 
407 	mutex_lock(&dev->struct_mutex);
408 	if (dev_priv->kernel_context)
409 		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410 
411 	list_for_each_entry(file, &dev->filelist, lhead) {
412 		struct drm_i915_file_private *fpriv = file->driver_priv;
413 		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 	}
415 	mutex_unlock(&dev->struct_mutex);
416 
417 	print_file_stats(m, "[k]contexts", stats);
418 }
419 
420 static int i915_gem_object_info(struct seq_file *m, void *data)
421 {
422 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 	struct drm_device *dev = &dev_priv->drm;
424 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
425 	u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 	u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427 	struct drm_i915_gem_object *obj;
428 	unsigned int page_sizes = 0;
429 	struct drm_file *file;
430 	char buf[80];
431 	int ret;
432 
433 	ret = mutex_lock_interruptible(&dev->struct_mutex);
434 	if (ret)
435 		return ret;
436 
437 	seq_printf(m, "%u objects, %llu bytes\n",
438 		   dev_priv->mm.object_count,
439 		   dev_priv->mm.object_memory);
440 
441 	size = count = 0;
442 	mapped_size = mapped_count = 0;
443 	purgeable_size = purgeable_count = 0;
444 	huge_size = huge_count = 0;
445 
446 	spin_lock(&dev_priv->mm.obj_lock);
447 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448 		size += obj->base.size;
449 		++count;
450 
451 		if (obj->mm.madv == I915_MADV_DONTNEED) {
452 			purgeable_size += obj->base.size;
453 			++purgeable_count;
454 		}
455 
456 		if (obj->mm.mapping) {
457 			mapped_count++;
458 			mapped_size += obj->base.size;
459 		}
460 
461 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 			huge_count++;
463 			huge_size += obj->base.size;
464 			page_sizes |= obj->mm.page_sizes.sg;
465 		}
466 	}
467 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468 
469 	size = count = dpy_size = dpy_count = 0;
470 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471 		size += obj->base.size;
472 		++count;
473 
474 		if (obj->pin_global) {
475 			dpy_size += obj->base.size;
476 			++dpy_count;
477 		}
478 
479 		if (obj->mm.madv == I915_MADV_DONTNEED) {
480 			purgeable_size += obj->base.size;
481 			++purgeable_count;
482 		}
483 
484 		if (obj->mm.mapping) {
485 			mapped_count++;
486 			mapped_size += obj->base.size;
487 		}
488 
489 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 			huge_count++;
491 			huge_size += obj->base.size;
492 			page_sizes |= obj->mm.page_sizes.sg;
493 		}
494 	}
495 	spin_unlock(&dev_priv->mm.obj_lock);
496 
497 	seq_printf(m, "%u bound objects, %llu bytes\n",
498 		   count, size);
499 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
500 		   purgeable_count, purgeable_size);
501 	seq_printf(m, "%u mapped objects, %llu bytes\n",
502 		   mapped_count, mapped_size);
503 	seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 		   huge_count,
505 		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 		   huge_size);
507 	seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508 		   dpy_count, dpy_size);
509 
510 	seq_printf(m, "%llu [%pa] gtt total\n",
511 		   ggtt->vm.total, &ggtt->mappable_end);
512 	seq_printf(m, "Supported page sizes: %s\n",
513 		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 					buf, sizeof(buf)));
515 
516 	seq_putc(m, '\n');
517 	print_batch_pool_stats(m, dev_priv);
518 	mutex_unlock(&dev->struct_mutex);
519 
520 	mutex_lock(&dev->filelist_mutex);
521 	print_context_stats(m, dev_priv);
522 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 		struct file_stats stats;
524 		struct drm_i915_file_private *file_priv = file->driver_priv;
525 		struct i915_request *request;
526 		struct task_struct *task;
527 
528 		mutex_lock(&dev->struct_mutex);
529 
530 		memset(&stats, 0, sizeof(stats));
531 		stats.file_priv = file->driver_priv;
532 		spin_lock(&file->table_lock);
533 		idr_for_each(&file->object_idr, per_file_stats, &stats);
534 		spin_unlock(&file->table_lock);
535 		/*
536 		 * Although we have a valid reference on file->pid, that does
537 		 * not guarantee that the task_struct who called get_pid() is
538 		 * still alive (e.g. get_pid(current) => fork() => exit()).
539 		 * Therefore, we need to protect this ->comm access using RCU.
540 		 */
541 		request = list_first_entry_or_null(&file_priv->mm.request_list,
542 						   struct i915_request,
543 						   client_link);
544 		rcu_read_lock();
545 		task = pid_task(request && request->gem_context->pid ?
546 				request->gem_context->pid : file->pid,
547 				PIDTYPE_PID);
548 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
549 		rcu_read_unlock();
550 
551 		mutex_unlock(&dev->struct_mutex);
552 	}
553 	mutex_unlock(&dev->filelist_mutex);
554 
555 	return 0;
556 }
557 
558 static int i915_gem_gtt_info(struct seq_file *m, void *data)
559 {
560 	struct drm_info_node *node = m->private;
561 	struct drm_i915_private *dev_priv = node_to_i915(node);
562 	struct drm_device *dev = &dev_priv->drm;
563 	struct drm_i915_gem_object **objects;
564 	struct drm_i915_gem_object *obj;
565 	u64 total_obj_size, total_gtt_size;
566 	unsigned long nobject, n;
567 	int count, ret;
568 
569 	nobject = READ_ONCE(dev_priv->mm.object_count);
570 	objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 	if (!objects)
572 		return -ENOMEM;
573 
574 	ret = mutex_lock_interruptible(&dev->struct_mutex);
575 	if (ret)
576 		return ret;
577 
578 	count = 0;
579 	spin_lock(&dev_priv->mm.obj_lock);
580 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 		objects[count++] = obj;
582 		if (count == nobject)
583 			break;
584 	}
585 	spin_unlock(&dev_priv->mm.obj_lock);
586 
587 	total_obj_size = total_gtt_size = 0;
588 	for (n = 0;  n < count; n++) {
589 		obj = objects[n];
590 
591 		seq_puts(m, "   ");
592 		describe_obj(m, obj);
593 		seq_putc(m, '\n');
594 		total_obj_size += obj->base.size;
595 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596 	}
597 
598 	mutex_unlock(&dev->struct_mutex);
599 
600 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601 		   count, total_obj_size, total_gtt_size);
602 	kvfree(objects);
603 
604 	return 0;
605 }
606 
607 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608 {
609 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 	struct drm_device *dev = &dev_priv->drm;
611 	struct drm_i915_gem_object *obj;
612 	struct intel_engine_cs *engine;
613 	enum intel_engine_id id;
614 	int total = 0;
615 	int ret, j;
616 
617 	ret = mutex_lock_interruptible(&dev->struct_mutex);
618 	if (ret)
619 		return ret;
620 
621 	for_each_engine(engine, dev_priv, id) {
622 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623 			int count;
624 
625 			count = 0;
626 			list_for_each_entry(obj,
627 					    &engine->batch_pool.cache_list[j],
628 					    batch_pool_link)
629 				count++;
630 			seq_printf(m, "%s cache[%d]: %d objects\n",
631 				   engine->name, j, count);
632 
633 			list_for_each_entry(obj,
634 					    &engine->batch_pool.cache_list[j],
635 					    batch_pool_link) {
636 				seq_puts(m, "   ");
637 				describe_obj(m, obj);
638 				seq_putc(m, '\n');
639 			}
640 
641 			total += count;
642 		}
643 	}
644 
645 	seq_printf(m, "total: %d\n", total);
646 
647 	mutex_unlock(&dev->struct_mutex);
648 
649 	return 0;
650 }
651 
652 static void gen8_display_interrupt_info(struct seq_file *m)
653 {
654 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 	int pipe;
656 
657 	for_each_pipe(dev_priv, pipe) {
658 		enum intel_display_power_domain power_domain;
659 
660 		power_domain = POWER_DOMAIN_PIPE(pipe);
661 		if (!intel_display_power_get_if_enabled(dev_priv,
662 							power_domain)) {
663 			seq_printf(m, "Pipe %c power disabled\n",
664 				   pipe_name(pipe));
665 			continue;
666 		}
667 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 			   pipe_name(pipe),
669 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 			   pipe_name(pipe),
672 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 		seq_printf(m, "Pipe %c IER:\t%08x\n",
674 			   pipe_name(pipe),
675 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
676 
677 		intel_display_power_put(dev_priv, power_domain);
678 	}
679 
680 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 		   I915_READ(GEN8_DE_PORT_IMR));
682 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 		   I915_READ(GEN8_DE_PORT_IIR));
684 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 		   I915_READ(GEN8_DE_PORT_IER));
686 
687 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 		   I915_READ(GEN8_DE_MISC_IMR));
689 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 		   I915_READ(GEN8_DE_MISC_IIR));
691 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 		   I915_READ(GEN8_DE_MISC_IER));
693 
694 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 		   I915_READ(GEN8_PCU_IMR));
696 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 		   I915_READ(GEN8_PCU_IIR));
698 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 		   I915_READ(GEN8_PCU_IER));
700 }
701 
702 static int i915_interrupt_info(struct seq_file *m, void *data)
703 {
704 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
705 	struct intel_engine_cs *engine;
706 	enum intel_engine_id id;
707 	int i, pipe;
708 
709 	intel_runtime_pm_get(dev_priv);
710 
711 	if (IS_CHERRYVIEW(dev_priv)) {
712 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 			   I915_READ(GEN8_MASTER_IRQ));
714 
715 		seq_printf(m, "Display IER:\t%08x\n",
716 			   I915_READ(VLV_IER));
717 		seq_printf(m, "Display IIR:\t%08x\n",
718 			   I915_READ(VLV_IIR));
719 		seq_printf(m, "Display IIR_RW:\t%08x\n",
720 			   I915_READ(VLV_IIR_RW));
721 		seq_printf(m, "Display IMR:\t%08x\n",
722 			   I915_READ(VLV_IMR));
723 		for_each_pipe(dev_priv, pipe) {
724 			enum intel_display_power_domain power_domain;
725 
726 			power_domain = POWER_DOMAIN_PIPE(pipe);
727 			if (!intel_display_power_get_if_enabled(dev_priv,
728 								power_domain)) {
729 				seq_printf(m, "Pipe %c power disabled\n",
730 					   pipe_name(pipe));
731 				continue;
732 			}
733 
734 			seq_printf(m, "Pipe %c stat:\t%08x\n",
735 				   pipe_name(pipe),
736 				   I915_READ(PIPESTAT(pipe)));
737 
738 			intel_display_power_put(dev_priv, power_domain);
739 		}
740 
741 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742 		seq_printf(m, "Port hotplug:\t%08x\n",
743 			   I915_READ(PORT_HOTPLUG_EN));
744 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 			   I915_READ(VLV_DPFLIPSTAT));
746 		seq_printf(m, "DPINVGTT:\t%08x\n",
747 			   I915_READ(DPINVGTT));
748 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
749 
750 		for (i = 0; i < 4; i++) {
751 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 				   i, I915_READ(GEN8_GT_IMR(i)));
753 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 				   i, I915_READ(GEN8_GT_IIR(i)));
755 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 				   i, I915_READ(GEN8_GT_IER(i)));
757 		}
758 
759 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 			   I915_READ(GEN8_PCU_IMR));
761 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 			   I915_READ(GEN8_PCU_IIR));
763 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 			   I915_READ(GEN8_PCU_IER));
765 	} else if (INTEL_GEN(dev_priv) >= 11) {
766 		seq_printf(m, "Master Interrupt Control:  %08x\n",
767 			   I915_READ(GEN11_GFX_MSTR_IRQ));
768 
769 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
770 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
772 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
774 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
778 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
780 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781 
782 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 			   I915_READ(GEN11_DISPLAY_INT_CTL));
784 
785 		gen8_display_interrupt_info(m);
786 	} else if (INTEL_GEN(dev_priv) >= 8) {
787 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 			   I915_READ(GEN8_MASTER_IRQ));
789 
790 		for (i = 0; i < 4; i++) {
791 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 				   i, I915_READ(GEN8_GT_IMR(i)));
793 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 				   i, I915_READ(GEN8_GT_IIR(i)));
795 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 				   i, I915_READ(GEN8_GT_IER(i)));
797 		}
798 
799 		gen8_display_interrupt_info(m);
800 	} else if (IS_VALLEYVIEW(dev_priv)) {
801 		seq_printf(m, "Display IER:\t%08x\n",
802 			   I915_READ(VLV_IER));
803 		seq_printf(m, "Display IIR:\t%08x\n",
804 			   I915_READ(VLV_IIR));
805 		seq_printf(m, "Display IIR_RW:\t%08x\n",
806 			   I915_READ(VLV_IIR_RW));
807 		seq_printf(m, "Display IMR:\t%08x\n",
808 			   I915_READ(VLV_IMR));
809 		for_each_pipe(dev_priv, pipe) {
810 			enum intel_display_power_domain power_domain;
811 
812 			power_domain = POWER_DOMAIN_PIPE(pipe);
813 			if (!intel_display_power_get_if_enabled(dev_priv,
814 								power_domain)) {
815 				seq_printf(m, "Pipe %c power disabled\n",
816 					   pipe_name(pipe));
817 				continue;
818 			}
819 
820 			seq_printf(m, "Pipe %c stat:\t%08x\n",
821 				   pipe_name(pipe),
822 				   I915_READ(PIPESTAT(pipe)));
823 			intel_display_power_put(dev_priv, power_domain);
824 		}
825 
826 		seq_printf(m, "Master IER:\t%08x\n",
827 			   I915_READ(VLV_MASTER_IER));
828 
829 		seq_printf(m, "Render IER:\t%08x\n",
830 			   I915_READ(GTIER));
831 		seq_printf(m, "Render IIR:\t%08x\n",
832 			   I915_READ(GTIIR));
833 		seq_printf(m, "Render IMR:\t%08x\n",
834 			   I915_READ(GTIMR));
835 
836 		seq_printf(m, "PM IER:\t\t%08x\n",
837 			   I915_READ(GEN6_PMIER));
838 		seq_printf(m, "PM IIR:\t\t%08x\n",
839 			   I915_READ(GEN6_PMIIR));
840 		seq_printf(m, "PM IMR:\t\t%08x\n",
841 			   I915_READ(GEN6_PMIMR));
842 
843 		seq_printf(m, "Port hotplug:\t%08x\n",
844 			   I915_READ(PORT_HOTPLUG_EN));
845 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 			   I915_READ(VLV_DPFLIPSTAT));
847 		seq_printf(m, "DPINVGTT:\t%08x\n",
848 			   I915_READ(DPINVGTT));
849 
850 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
851 		seq_printf(m, "Interrupt enable:    %08x\n",
852 			   I915_READ(IER));
853 		seq_printf(m, "Interrupt identity:  %08x\n",
854 			   I915_READ(IIR));
855 		seq_printf(m, "Interrupt mask:      %08x\n",
856 			   I915_READ(IMR));
857 		for_each_pipe(dev_priv, pipe)
858 			seq_printf(m, "Pipe %c stat:         %08x\n",
859 				   pipe_name(pipe),
860 				   I915_READ(PIPESTAT(pipe)));
861 	} else {
862 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
863 			   I915_READ(DEIER));
864 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
865 			   I915_READ(DEIIR));
866 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
867 			   I915_READ(DEIMR));
868 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
869 			   I915_READ(SDEIER));
870 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
871 			   I915_READ(SDEIIR));
872 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
873 			   I915_READ(SDEIMR));
874 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
875 			   I915_READ(GTIER));
876 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
877 			   I915_READ(GTIIR));
878 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
879 			   I915_READ(GTIMR));
880 	}
881 
882 	if (INTEL_GEN(dev_priv) >= 11) {
883 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
895 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901 
902 	} else if (INTEL_GEN(dev_priv) >= 6) {
903 		for_each_engine(engine, dev_priv, id) {
904 			seq_printf(m,
905 				   "Graphics Interrupt mask (%s):	%08x\n",
906 				   engine->name, I915_READ_IMR(engine));
907 		}
908 	}
909 
910 	intel_runtime_pm_put(dev_priv);
911 
912 	return 0;
913 }
914 
915 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916 {
917 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 	struct drm_device *dev = &dev_priv->drm;
919 	int i, ret;
920 
921 	ret = mutex_lock_interruptible(&dev->struct_mutex);
922 	if (ret)
923 		return ret;
924 
925 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
927 		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
928 
929 		seq_printf(m, "Fence %d, pin count = %d, object = ",
930 			   i, dev_priv->fence_regs[i].pin_count);
931 		if (!vma)
932 			seq_puts(m, "unused");
933 		else
934 			describe_obj(m, vma->obj);
935 		seq_putc(m, '\n');
936 	}
937 
938 	mutex_unlock(&dev->struct_mutex);
939 	return 0;
940 }
941 
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
943 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 			      size_t count, loff_t *pos)
945 {
946 	struct i915_gpu_state *error;
947 	ssize_t ret;
948 	void *buf;
949 
950 	error = file->private_data;
951 	if (!error)
952 		return 0;
953 
954 	/* Bounce buffer required because of kernfs __user API convenience. */
955 	buf = kmalloc(count, GFP_KERNEL);
956 	if (!buf)
957 		return -ENOMEM;
958 
959 	ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
960 	if (ret <= 0)
961 		goto out;
962 
963 	if (!copy_to_user(ubuf, buf, ret))
964 		*pos += ret;
965 	else
966 		ret = -EFAULT;
967 
968 out:
969 	kfree(buf);
970 	return ret;
971 }
972 
973 static int gpu_state_release(struct inode *inode, struct file *file)
974 {
975 	i915_gpu_state_put(file->private_data);
976 	return 0;
977 }
978 
979 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980 {
981 	struct drm_i915_private *i915 = inode->i_private;
982 	struct i915_gpu_state *gpu;
983 
984 	intel_runtime_pm_get(i915);
985 	gpu = i915_capture_gpu_state(i915);
986 	intel_runtime_pm_put(i915);
987 	if (IS_ERR(gpu))
988 		return PTR_ERR(gpu);
989 
990 	file->private_data = gpu;
991 	return 0;
992 }
993 
994 static const struct file_operations i915_gpu_info_fops = {
995 	.owner = THIS_MODULE,
996 	.open = i915_gpu_info_open,
997 	.read = gpu_state_read,
998 	.llseek = default_llseek,
999 	.release = gpu_state_release,
1000 };
1001 
1002 static ssize_t
1003 i915_error_state_write(struct file *filp,
1004 		       const char __user *ubuf,
1005 		       size_t cnt,
1006 		       loff_t *ppos)
1007 {
1008 	struct i915_gpu_state *error = filp->private_data;
1009 
1010 	if (!error)
1011 		return 0;
1012 
1013 	DRM_DEBUG_DRIVER("Resetting error state\n");
1014 	i915_reset_error_state(error->i915);
1015 
1016 	return cnt;
1017 }
1018 
1019 static int i915_error_state_open(struct inode *inode, struct file *file)
1020 {
1021 	struct i915_gpu_state *error;
1022 
1023 	error = i915_first_error_state(inode->i_private);
1024 	if (IS_ERR(error))
1025 		return PTR_ERR(error);
1026 
1027 	file->private_data  = error;
1028 	return 0;
1029 }
1030 
1031 static const struct file_operations i915_error_state_fops = {
1032 	.owner = THIS_MODULE,
1033 	.open = i915_error_state_open,
1034 	.read = gpu_state_read,
1035 	.write = i915_error_state_write,
1036 	.llseek = default_llseek,
1037 	.release = gpu_state_release,
1038 };
1039 #endif
1040 
1041 static int
1042 i915_next_seqno_set(void *data, u64 val)
1043 {
1044 	struct drm_i915_private *dev_priv = data;
1045 	struct drm_device *dev = &dev_priv->drm;
1046 	int ret;
1047 
1048 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1049 	if (ret)
1050 		return ret;
1051 
1052 	intel_runtime_pm_get(dev_priv);
1053 	ret = i915_gem_set_global_seqno(dev, val);
1054 	intel_runtime_pm_put(dev_priv);
1055 
1056 	mutex_unlock(&dev->struct_mutex);
1057 
1058 	return ret;
1059 }
1060 
1061 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1062 			NULL, i915_next_seqno_set,
1063 			"0x%llx\n");
1064 
1065 static int i915_frequency_info(struct seq_file *m, void *unused)
1066 {
1067 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1068 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1069 	int ret = 0;
1070 
1071 	intel_runtime_pm_get(dev_priv);
1072 
1073 	if (IS_GEN5(dev_priv)) {
1074 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1075 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1076 
1077 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1078 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1079 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1080 			   MEMSTAT_VID_SHIFT);
1081 		seq_printf(m, "Current P-state: %d\n",
1082 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1083 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1084 		u32 rpmodectl, freq_sts;
1085 
1086 		mutex_lock(&dev_priv->pcu_lock);
1087 
1088 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1089 		seq_printf(m, "Video Turbo Mode: %s\n",
1090 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1091 		seq_printf(m, "HW control enabled: %s\n",
1092 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1093 		seq_printf(m, "SW control enabled: %s\n",
1094 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1095 				  GEN6_RP_MEDIA_SW_MODE));
1096 
1097 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1098 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1099 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1100 
1101 		seq_printf(m, "actual GPU freq: %d MHz\n",
1102 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1103 
1104 		seq_printf(m, "current GPU freq: %d MHz\n",
1105 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1106 
1107 		seq_printf(m, "max GPU freq: %d MHz\n",
1108 			   intel_gpu_freq(dev_priv, rps->max_freq));
1109 
1110 		seq_printf(m, "min GPU freq: %d MHz\n",
1111 			   intel_gpu_freq(dev_priv, rps->min_freq));
1112 
1113 		seq_printf(m, "idle GPU freq: %d MHz\n",
1114 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1115 
1116 		seq_printf(m,
1117 			   "efficient (RPe) frequency: %d MHz\n",
1118 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1119 		mutex_unlock(&dev_priv->pcu_lock);
1120 	} else if (INTEL_GEN(dev_priv) >= 6) {
1121 		u32 rp_state_limits;
1122 		u32 gt_perf_status;
1123 		u32 rp_state_cap;
1124 		u32 rpmodectl, rpinclimit, rpdeclimit;
1125 		u32 rpstat, cagf, reqf;
1126 		u32 rpupei, rpcurup, rpprevup;
1127 		u32 rpdownei, rpcurdown, rpprevdown;
1128 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1129 		int max_freq;
1130 
1131 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1132 		if (IS_GEN9_LP(dev_priv)) {
1133 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1134 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1135 		} else {
1136 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1137 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1138 		}
1139 
1140 		/* RPSTAT1 is in the GT power well */
1141 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1142 
1143 		reqf = I915_READ(GEN6_RPNSWREQ);
1144 		if (INTEL_GEN(dev_priv) >= 9)
1145 			reqf >>= 23;
1146 		else {
1147 			reqf &= ~GEN6_TURBO_DISABLE;
1148 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1149 				reqf >>= 24;
1150 			else
1151 				reqf >>= 25;
1152 		}
1153 		reqf = intel_gpu_freq(dev_priv, reqf);
1154 
1155 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1156 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1157 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1158 
1159 		rpstat = I915_READ(GEN6_RPSTAT1);
1160 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1161 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1162 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1163 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1164 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1165 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1166 		cagf = intel_gpu_freq(dev_priv,
1167 				      intel_get_cagf(dev_priv, rpstat));
1168 
1169 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1170 
1171 		if (INTEL_GEN(dev_priv) >= 11) {
1172 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1173 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1174 			/*
1175 			 * The equivalent to the PM ISR & IIR cannot be read
1176 			 * without affecting the current state of the system
1177 			 */
1178 			pm_isr = 0;
1179 			pm_iir = 0;
1180 		} else if (INTEL_GEN(dev_priv) >= 8) {
1181 			pm_ier = I915_READ(GEN8_GT_IER(2));
1182 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1183 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1184 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1185 		} else {
1186 			pm_ier = I915_READ(GEN6_PMIER);
1187 			pm_imr = I915_READ(GEN6_PMIMR);
1188 			pm_isr = I915_READ(GEN6_PMISR);
1189 			pm_iir = I915_READ(GEN6_PMIIR);
1190 		}
1191 		pm_mask = I915_READ(GEN6_PMINTRMSK);
1192 
1193 		seq_printf(m, "Video Turbo Mode: %s\n",
1194 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1195 		seq_printf(m, "HW control enabled: %s\n",
1196 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1197 		seq_printf(m, "SW control enabled: %s\n",
1198 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1199 				  GEN6_RP_MEDIA_SW_MODE));
1200 
1201 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1202 			   pm_ier, pm_imr, pm_mask);
1203 		if (INTEL_GEN(dev_priv) <= 10)
1204 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1205 				   pm_isr, pm_iir);
1206 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1207 			   rps->pm_intrmsk_mbz);
1208 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1209 		seq_printf(m, "Render p-state ratio: %d\n",
1210 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1211 		seq_printf(m, "Render p-state VID: %d\n",
1212 			   gt_perf_status & 0xff);
1213 		seq_printf(m, "Render p-state limit: %d\n",
1214 			   rp_state_limits & 0xff);
1215 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1216 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1217 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1218 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1219 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1220 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1221 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1222 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1223 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
1224 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1225 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
1226 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1227 		seq_printf(m, "Up threshold: %d%%\n",
1228 			   rps->power.up_threshold);
1229 
1230 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1231 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1232 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1233 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1234 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1235 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1236 		seq_printf(m, "Down threshold: %d%%\n",
1237 			   rps->power.down_threshold);
1238 
1239 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1240 			    rp_state_cap >> 16) & 0xff;
1241 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1242 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1243 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1244 			   intel_gpu_freq(dev_priv, max_freq));
1245 
1246 		max_freq = (rp_state_cap & 0xff00) >> 8;
1247 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1248 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1249 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1250 			   intel_gpu_freq(dev_priv, max_freq));
1251 
1252 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1253 			    rp_state_cap >> 0) & 0xff;
1254 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1255 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1256 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1257 			   intel_gpu_freq(dev_priv, max_freq));
1258 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1259 			   intel_gpu_freq(dev_priv, rps->max_freq));
1260 
1261 		seq_printf(m, "Current freq: %d MHz\n",
1262 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1263 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1264 		seq_printf(m, "Idle freq: %d MHz\n",
1265 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1266 		seq_printf(m, "Min freq: %d MHz\n",
1267 			   intel_gpu_freq(dev_priv, rps->min_freq));
1268 		seq_printf(m, "Boost freq: %d MHz\n",
1269 			   intel_gpu_freq(dev_priv, rps->boost_freq));
1270 		seq_printf(m, "Max freq: %d MHz\n",
1271 			   intel_gpu_freq(dev_priv, rps->max_freq));
1272 		seq_printf(m,
1273 			   "efficient (RPe) frequency: %d MHz\n",
1274 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1275 	} else {
1276 		seq_puts(m, "no P-state info available\n");
1277 	}
1278 
1279 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1280 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1281 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1282 
1283 	intel_runtime_pm_put(dev_priv);
1284 	return ret;
1285 }
1286 
1287 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1288 			       struct seq_file *m,
1289 			       struct intel_instdone *instdone)
1290 {
1291 	int slice;
1292 	int subslice;
1293 
1294 	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1295 		   instdone->instdone);
1296 
1297 	if (INTEL_GEN(dev_priv) <= 3)
1298 		return;
1299 
1300 	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1301 		   instdone->slice_common);
1302 
1303 	if (INTEL_GEN(dev_priv) <= 6)
1304 		return;
1305 
1306 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1307 		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1308 			   slice, subslice, instdone->sampler[slice][subslice]);
1309 
1310 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1311 		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1312 			   slice, subslice, instdone->row[slice][subslice]);
1313 }
1314 
1315 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1316 {
1317 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1318 	struct intel_engine_cs *engine;
1319 	u64 acthd[I915_NUM_ENGINES];
1320 	u32 seqno[I915_NUM_ENGINES];
1321 	struct intel_instdone instdone;
1322 	enum intel_engine_id id;
1323 
1324 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1325 		seq_puts(m, "Wedged\n");
1326 	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1327 		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1328 	if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1329 		seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1330 	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1331 		seq_puts(m, "Waiter holding struct mutex\n");
1332 	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1333 		seq_puts(m, "struct_mutex blocked for reset\n");
1334 
1335 	if (!i915_modparams.enable_hangcheck) {
1336 		seq_puts(m, "Hangcheck disabled\n");
1337 		return 0;
1338 	}
1339 
1340 	intel_runtime_pm_get(dev_priv);
1341 
1342 	for_each_engine(engine, dev_priv, id) {
1343 		acthd[id] = intel_engine_get_active_head(engine);
1344 		seqno[id] = intel_engine_get_seqno(engine);
1345 	}
1346 
1347 	intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1348 
1349 	intel_runtime_pm_put(dev_priv);
1350 
1351 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1352 		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1353 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1354 					    jiffies));
1355 	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1356 		seq_puts(m, "Hangcheck active, work pending\n");
1357 	else
1358 		seq_puts(m, "Hangcheck inactive\n");
1359 
1360 	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1361 
1362 	for_each_engine(engine, dev_priv, id) {
1363 		struct intel_breadcrumbs *b = &engine->breadcrumbs;
1364 		struct rb_node *rb;
1365 
1366 		seq_printf(m, "%s:\n", engine->name);
1367 		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1368 			   engine->hangcheck.seqno, seqno[id],
1369 			   intel_engine_last_submit(engine));
1370 		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1371 			   yesno(intel_engine_has_waiter(engine)),
1372 			   yesno(test_bit(engine->id,
1373 					  &dev_priv->gpu_error.missed_irq_rings)),
1374 			   yesno(engine->hangcheck.stalled),
1375 			   yesno(engine->hangcheck.wedged));
1376 
1377 		spin_lock_irq(&b->rb_lock);
1378 		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1379 			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1380 
1381 			seq_printf(m, "\t%s [%d] waiting for %x\n",
1382 				   w->tsk->comm, w->tsk->pid, w->seqno);
1383 		}
1384 		spin_unlock_irq(&b->rb_lock);
1385 
1386 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1387 			   (long long)engine->hangcheck.acthd,
1388 			   (long long)acthd[id]);
1389 		seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1390 			   hangcheck_action_to_str(engine->hangcheck.action),
1391 			   engine->hangcheck.action,
1392 			   jiffies_to_msecs(jiffies -
1393 					    engine->hangcheck.action_timestamp));
1394 
1395 		if (engine->id == RCS) {
1396 			seq_puts(m, "\tinstdone read =\n");
1397 
1398 			i915_instdone_info(dev_priv, m, &instdone);
1399 
1400 			seq_puts(m, "\tinstdone accu =\n");
1401 
1402 			i915_instdone_info(dev_priv, m,
1403 					   &engine->hangcheck.instdone);
1404 		}
1405 	}
1406 
1407 	return 0;
1408 }
1409 
1410 static int i915_reset_info(struct seq_file *m, void *unused)
1411 {
1412 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1413 	struct i915_gpu_error *error = &dev_priv->gpu_error;
1414 	struct intel_engine_cs *engine;
1415 	enum intel_engine_id id;
1416 
1417 	seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1418 
1419 	for_each_engine(engine, dev_priv, id) {
1420 		seq_printf(m, "%s = %u\n", engine->name,
1421 			   i915_reset_engine_count(error, engine));
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 static int ironlake_drpc_info(struct seq_file *m)
1428 {
1429 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1430 	u32 rgvmodectl, rstdbyctl;
1431 	u16 crstandvid;
1432 
1433 	rgvmodectl = I915_READ(MEMMODECTL);
1434 	rstdbyctl = I915_READ(RSTDBYCTL);
1435 	crstandvid = I915_READ16(CRSTANDVID);
1436 
1437 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1438 	seq_printf(m, "Boost freq: %d\n",
1439 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1440 		   MEMMODE_BOOST_FREQ_SHIFT);
1441 	seq_printf(m, "HW control enabled: %s\n",
1442 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1443 	seq_printf(m, "SW control enabled: %s\n",
1444 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1445 	seq_printf(m, "Gated voltage change: %s\n",
1446 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1447 	seq_printf(m, "Starting frequency: P%d\n",
1448 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1449 	seq_printf(m, "Max P-state: P%d\n",
1450 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1451 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1452 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1453 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1454 	seq_printf(m, "Render standby enabled: %s\n",
1455 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1456 	seq_puts(m, "Current RS state: ");
1457 	switch (rstdbyctl & RSX_STATUS_MASK) {
1458 	case RSX_STATUS_ON:
1459 		seq_puts(m, "on\n");
1460 		break;
1461 	case RSX_STATUS_RC1:
1462 		seq_puts(m, "RC1\n");
1463 		break;
1464 	case RSX_STATUS_RC1E:
1465 		seq_puts(m, "RC1E\n");
1466 		break;
1467 	case RSX_STATUS_RS1:
1468 		seq_puts(m, "RS1\n");
1469 		break;
1470 	case RSX_STATUS_RS2:
1471 		seq_puts(m, "RS2 (RC6)\n");
1472 		break;
1473 	case RSX_STATUS_RS3:
1474 		seq_puts(m, "RC3 (RC6+)\n");
1475 		break;
1476 	default:
1477 		seq_puts(m, "unknown\n");
1478 		break;
1479 	}
1480 
1481 	return 0;
1482 }
1483 
1484 static int i915_forcewake_domains(struct seq_file *m, void *data)
1485 {
1486 	struct drm_i915_private *i915 = node_to_i915(m->private);
1487 	struct intel_uncore_forcewake_domain *fw_domain;
1488 	unsigned int tmp;
1489 
1490 	seq_printf(m, "user.bypass_count = %u\n",
1491 		   i915->uncore.user_forcewake.count);
1492 
1493 	for_each_fw_domain(fw_domain, i915, tmp)
1494 		seq_printf(m, "%s.wake_count = %u\n",
1495 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1496 			   READ_ONCE(fw_domain->wake_count));
1497 
1498 	return 0;
1499 }
1500 
1501 static void print_rc6_res(struct seq_file *m,
1502 			  const char *title,
1503 			  const i915_reg_t reg)
1504 {
1505 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1506 
1507 	seq_printf(m, "%s %u (%llu us)\n",
1508 		   title, I915_READ(reg),
1509 		   intel_rc6_residency_us(dev_priv, reg));
1510 }
1511 
1512 static int vlv_drpc_info(struct seq_file *m)
1513 {
1514 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1515 	u32 rcctl1, pw_status;
1516 
1517 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1518 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1519 
1520 	seq_printf(m, "RC6 Enabled: %s\n",
1521 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1522 					GEN6_RC_CTL_EI_MODE(1))));
1523 	seq_printf(m, "Render Power Well: %s\n",
1524 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1525 	seq_printf(m, "Media Power Well: %s\n",
1526 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1527 
1528 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1529 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1530 
1531 	return i915_forcewake_domains(m, NULL);
1532 }
1533 
1534 static int gen6_drpc_info(struct seq_file *m)
1535 {
1536 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1537 	u32 gt_core_status, rcctl1, rc6vids = 0;
1538 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1539 
1540 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1541 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1542 
1543 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1544 	if (INTEL_GEN(dev_priv) >= 9) {
1545 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1546 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1547 	}
1548 
1549 	if (INTEL_GEN(dev_priv) <= 7) {
1550 		mutex_lock(&dev_priv->pcu_lock);
1551 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1552 				       &rc6vids);
1553 		mutex_unlock(&dev_priv->pcu_lock);
1554 	}
1555 
1556 	seq_printf(m, "RC1e Enabled: %s\n",
1557 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1558 	seq_printf(m, "RC6 Enabled: %s\n",
1559 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1560 	if (INTEL_GEN(dev_priv) >= 9) {
1561 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1562 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1563 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1564 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1565 	}
1566 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1567 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1568 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1569 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1570 	seq_puts(m, "Current RC state: ");
1571 	switch (gt_core_status & GEN6_RCn_MASK) {
1572 	case GEN6_RC0:
1573 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1574 			seq_puts(m, "Core Power Down\n");
1575 		else
1576 			seq_puts(m, "on\n");
1577 		break;
1578 	case GEN6_RC3:
1579 		seq_puts(m, "RC3\n");
1580 		break;
1581 	case GEN6_RC6:
1582 		seq_puts(m, "RC6\n");
1583 		break;
1584 	case GEN6_RC7:
1585 		seq_puts(m, "RC7\n");
1586 		break;
1587 	default:
1588 		seq_puts(m, "Unknown\n");
1589 		break;
1590 	}
1591 
1592 	seq_printf(m, "Core Power Down: %s\n",
1593 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1594 	if (INTEL_GEN(dev_priv) >= 9) {
1595 		seq_printf(m, "Render Power Well: %s\n",
1596 			(gen9_powergate_status &
1597 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1598 		seq_printf(m, "Media Power Well: %s\n",
1599 			(gen9_powergate_status &
1600 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1601 	}
1602 
1603 	/* Not exactly sure what this is */
1604 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1605 		      GEN6_GT_GFX_RC6_LOCKED);
1606 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1607 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1608 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1609 
1610 	if (INTEL_GEN(dev_priv) <= 7) {
1611 		seq_printf(m, "RC6   voltage: %dmV\n",
1612 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1613 		seq_printf(m, "RC6+  voltage: %dmV\n",
1614 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1615 		seq_printf(m, "RC6++ voltage: %dmV\n",
1616 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1617 	}
1618 
1619 	return i915_forcewake_domains(m, NULL);
1620 }
1621 
1622 static int i915_drpc_info(struct seq_file *m, void *unused)
1623 {
1624 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1625 	int err;
1626 
1627 	intel_runtime_pm_get(dev_priv);
1628 
1629 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1630 		err = vlv_drpc_info(m);
1631 	else if (INTEL_GEN(dev_priv) >= 6)
1632 		err = gen6_drpc_info(m);
1633 	else
1634 		err = ironlake_drpc_info(m);
1635 
1636 	intel_runtime_pm_put(dev_priv);
1637 
1638 	return err;
1639 }
1640 
1641 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1642 {
1643 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1644 
1645 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1646 		   dev_priv->fb_tracking.busy_bits);
1647 
1648 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1649 		   dev_priv->fb_tracking.flip_bits);
1650 
1651 	return 0;
1652 }
1653 
1654 static int i915_fbc_status(struct seq_file *m, void *unused)
1655 {
1656 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1657 	struct intel_fbc *fbc = &dev_priv->fbc;
1658 
1659 	if (!HAS_FBC(dev_priv))
1660 		return -ENODEV;
1661 
1662 	intel_runtime_pm_get(dev_priv);
1663 	mutex_lock(&fbc->lock);
1664 
1665 	if (intel_fbc_is_active(dev_priv))
1666 		seq_puts(m, "FBC enabled\n");
1667 	else
1668 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1669 
1670 	if (intel_fbc_is_active(dev_priv)) {
1671 		u32 mask;
1672 
1673 		if (INTEL_GEN(dev_priv) >= 8)
1674 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1675 		else if (INTEL_GEN(dev_priv) >= 7)
1676 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1677 		else if (INTEL_GEN(dev_priv) >= 5)
1678 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1679 		else if (IS_G4X(dev_priv))
1680 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1681 		else
1682 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1683 							FBC_STAT_COMPRESSED);
1684 
1685 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1686 	}
1687 
1688 	mutex_unlock(&fbc->lock);
1689 	intel_runtime_pm_put(dev_priv);
1690 
1691 	return 0;
1692 }
1693 
1694 static int i915_fbc_false_color_get(void *data, u64 *val)
1695 {
1696 	struct drm_i915_private *dev_priv = data;
1697 
1698 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1699 		return -ENODEV;
1700 
1701 	*val = dev_priv->fbc.false_color;
1702 
1703 	return 0;
1704 }
1705 
1706 static int i915_fbc_false_color_set(void *data, u64 val)
1707 {
1708 	struct drm_i915_private *dev_priv = data;
1709 	u32 reg;
1710 
1711 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1712 		return -ENODEV;
1713 
1714 	mutex_lock(&dev_priv->fbc.lock);
1715 
1716 	reg = I915_READ(ILK_DPFC_CONTROL);
1717 	dev_priv->fbc.false_color = val;
1718 
1719 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1720 		   (reg | FBC_CTL_FALSE_COLOR) :
1721 		   (reg & ~FBC_CTL_FALSE_COLOR));
1722 
1723 	mutex_unlock(&dev_priv->fbc.lock);
1724 	return 0;
1725 }
1726 
1727 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1728 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1729 			"%llu\n");
1730 
1731 static int i915_ips_status(struct seq_file *m, void *unused)
1732 {
1733 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1734 
1735 	if (!HAS_IPS(dev_priv))
1736 		return -ENODEV;
1737 
1738 	intel_runtime_pm_get(dev_priv);
1739 
1740 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1741 		   yesno(i915_modparams.enable_ips));
1742 
1743 	if (INTEL_GEN(dev_priv) >= 8) {
1744 		seq_puts(m, "Currently: unknown\n");
1745 	} else {
1746 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1747 			seq_puts(m, "Currently: enabled\n");
1748 		else
1749 			seq_puts(m, "Currently: disabled\n");
1750 	}
1751 
1752 	intel_runtime_pm_put(dev_priv);
1753 
1754 	return 0;
1755 }
1756 
1757 static int i915_sr_status(struct seq_file *m, void *unused)
1758 {
1759 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1760 	bool sr_enabled = false;
1761 
1762 	intel_runtime_pm_get(dev_priv);
1763 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1764 
1765 	if (INTEL_GEN(dev_priv) >= 9)
1766 		/* no global SR status; inspect per-plane WM */;
1767 	else if (HAS_PCH_SPLIT(dev_priv))
1768 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1769 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1770 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1771 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1772 	else if (IS_I915GM(dev_priv))
1773 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1774 	else if (IS_PINEVIEW(dev_priv))
1775 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1776 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1777 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1778 
1779 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1780 	intel_runtime_pm_put(dev_priv);
1781 
1782 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1783 
1784 	return 0;
1785 }
1786 
1787 static int i915_emon_status(struct seq_file *m, void *unused)
1788 {
1789 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1790 	struct drm_device *dev = &dev_priv->drm;
1791 	unsigned long temp, chipset, gfx;
1792 	int ret;
1793 
1794 	if (!IS_GEN5(dev_priv))
1795 		return -ENODEV;
1796 
1797 	intel_runtime_pm_get(dev_priv);
1798 
1799 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1800 	if (ret)
1801 		return ret;
1802 
1803 	temp = i915_mch_val(dev_priv);
1804 	chipset = i915_chipset_val(dev_priv);
1805 	gfx = i915_gfx_val(dev_priv);
1806 	mutex_unlock(&dev->struct_mutex);
1807 
1808 	seq_printf(m, "GMCH temp: %ld\n", temp);
1809 	seq_printf(m, "Chipset power: %ld\n", chipset);
1810 	seq_printf(m, "GFX power: %ld\n", gfx);
1811 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1812 
1813 	intel_runtime_pm_put(dev_priv);
1814 
1815 	return 0;
1816 }
1817 
1818 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1819 {
1820 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1821 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1822 	unsigned int max_gpu_freq, min_gpu_freq;
1823 	int gpu_freq, ia_freq;
1824 	int ret;
1825 
1826 	if (!HAS_LLC(dev_priv))
1827 		return -ENODEV;
1828 
1829 	intel_runtime_pm_get(dev_priv);
1830 
1831 	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1832 	if (ret)
1833 		goto out;
1834 
1835 	min_gpu_freq = rps->min_freq;
1836 	max_gpu_freq = rps->max_freq;
1837 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1838 		/* Convert GT frequency to 50 HZ units */
1839 		min_gpu_freq /= GEN9_FREQ_SCALER;
1840 		max_gpu_freq /= GEN9_FREQ_SCALER;
1841 	}
1842 
1843 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1844 
1845 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1846 		ia_freq = gpu_freq;
1847 		sandybridge_pcode_read(dev_priv,
1848 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1849 				       &ia_freq);
1850 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1851 			   intel_gpu_freq(dev_priv, (gpu_freq *
1852 						     (IS_GEN9_BC(dev_priv) ||
1853 						      INTEL_GEN(dev_priv) >= 10 ?
1854 						      GEN9_FREQ_SCALER : 1))),
1855 			   ((ia_freq >> 0) & 0xff) * 100,
1856 			   ((ia_freq >> 8) & 0xff) * 100);
1857 	}
1858 
1859 	mutex_unlock(&dev_priv->pcu_lock);
1860 
1861 out:
1862 	intel_runtime_pm_put(dev_priv);
1863 	return ret;
1864 }
1865 
1866 static int i915_opregion(struct seq_file *m, void *unused)
1867 {
1868 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1869 	struct drm_device *dev = &dev_priv->drm;
1870 	struct intel_opregion *opregion = &dev_priv->opregion;
1871 	int ret;
1872 
1873 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1874 	if (ret)
1875 		goto out;
1876 
1877 	if (opregion->header)
1878 		seq_write(m, opregion->header, OPREGION_SIZE);
1879 
1880 	mutex_unlock(&dev->struct_mutex);
1881 
1882 out:
1883 	return 0;
1884 }
1885 
1886 static int i915_vbt(struct seq_file *m, void *unused)
1887 {
1888 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1889 
1890 	if (opregion->vbt)
1891 		seq_write(m, opregion->vbt, opregion->vbt_size);
1892 
1893 	return 0;
1894 }
1895 
1896 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1897 {
1898 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1899 	struct drm_device *dev = &dev_priv->drm;
1900 	struct intel_framebuffer *fbdev_fb = NULL;
1901 	struct drm_framebuffer *drm_fb;
1902 	int ret;
1903 
1904 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1905 	if (ret)
1906 		return ret;
1907 
1908 #ifdef CONFIG_DRM_FBDEV_EMULATION
1909 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1910 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1911 
1912 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1913 			   fbdev_fb->base.width,
1914 			   fbdev_fb->base.height,
1915 			   fbdev_fb->base.format->depth,
1916 			   fbdev_fb->base.format->cpp[0] * 8,
1917 			   fbdev_fb->base.modifier,
1918 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1919 		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1920 		seq_putc(m, '\n');
1921 	}
1922 #endif
1923 
1924 	mutex_lock(&dev->mode_config.fb_lock);
1925 	drm_for_each_fb(drm_fb, dev) {
1926 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1927 		if (fb == fbdev_fb)
1928 			continue;
1929 
1930 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1931 			   fb->base.width,
1932 			   fb->base.height,
1933 			   fb->base.format->depth,
1934 			   fb->base.format->cpp[0] * 8,
1935 			   fb->base.modifier,
1936 			   drm_framebuffer_read_refcount(&fb->base));
1937 		describe_obj(m, intel_fb_obj(&fb->base));
1938 		seq_putc(m, '\n');
1939 	}
1940 	mutex_unlock(&dev->mode_config.fb_lock);
1941 	mutex_unlock(&dev->struct_mutex);
1942 
1943 	return 0;
1944 }
1945 
1946 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1947 {
1948 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1949 		   ring->space, ring->head, ring->tail, ring->emit);
1950 }
1951 
1952 static int i915_context_status(struct seq_file *m, void *unused)
1953 {
1954 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1955 	struct drm_device *dev = &dev_priv->drm;
1956 	struct intel_engine_cs *engine;
1957 	struct i915_gem_context *ctx;
1958 	enum intel_engine_id id;
1959 	int ret;
1960 
1961 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1962 	if (ret)
1963 		return ret;
1964 
1965 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1966 		seq_puts(m, "HW context ");
1967 		if (!list_empty(&ctx->hw_id_link))
1968 			seq_printf(m, "%x [pin %u]", ctx->hw_id,
1969 				   atomic_read(&ctx->hw_id_pin_count));
1970 		if (ctx->pid) {
1971 			struct task_struct *task;
1972 
1973 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1974 			if (task) {
1975 				seq_printf(m, "(%s [%d]) ",
1976 					   task->comm, task->pid);
1977 				put_task_struct(task);
1978 			}
1979 		} else if (IS_ERR(ctx->file_priv)) {
1980 			seq_puts(m, "(deleted) ");
1981 		} else {
1982 			seq_puts(m, "(kernel) ");
1983 		}
1984 
1985 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1986 		seq_putc(m, '\n');
1987 
1988 		for_each_engine(engine, dev_priv, id) {
1989 			struct intel_context *ce =
1990 				to_intel_context(ctx, engine);
1991 
1992 			seq_printf(m, "%s: ", engine->name);
1993 			if (ce->state)
1994 				describe_obj(m, ce->state->obj);
1995 			if (ce->ring)
1996 				describe_ctx_ring(m, ce->ring);
1997 			seq_putc(m, '\n');
1998 		}
1999 
2000 		seq_putc(m, '\n');
2001 	}
2002 
2003 	mutex_unlock(&dev->struct_mutex);
2004 
2005 	return 0;
2006 }
2007 
2008 static const char *swizzle_string(unsigned swizzle)
2009 {
2010 	switch (swizzle) {
2011 	case I915_BIT_6_SWIZZLE_NONE:
2012 		return "none";
2013 	case I915_BIT_6_SWIZZLE_9:
2014 		return "bit9";
2015 	case I915_BIT_6_SWIZZLE_9_10:
2016 		return "bit9/bit10";
2017 	case I915_BIT_6_SWIZZLE_9_11:
2018 		return "bit9/bit11";
2019 	case I915_BIT_6_SWIZZLE_9_10_11:
2020 		return "bit9/bit10/bit11";
2021 	case I915_BIT_6_SWIZZLE_9_17:
2022 		return "bit9/bit17";
2023 	case I915_BIT_6_SWIZZLE_9_10_17:
2024 		return "bit9/bit10/bit17";
2025 	case I915_BIT_6_SWIZZLE_UNKNOWN:
2026 		return "unknown";
2027 	}
2028 
2029 	return "bug";
2030 }
2031 
2032 static int i915_swizzle_info(struct seq_file *m, void *data)
2033 {
2034 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2035 
2036 	intel_runtime_pm_get(dev_priv);
2037 
2038 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2039 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2040 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2041 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2042 
2043 	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2044 		seq_printf(m, "DDC = 0x%08x\n",
2045 			   I915_READ(DCC));
2046 		seq_printf(m, "DDC2 = 0x%08x\n",
2047 			   I915_READ(DCC2));
2048 		seq_printf(m, "C0DRB3 = 0x%04x\n",
2049 			   I915_READ16(C0DRB3));
2050 		seq_printf(m, "C1DRB3 = 0x%04x\n",
2051 			   I915_READ16(C1DRB3));
2052 	} else if (INTEL_GEN(dev_priv) >= 6) {
2053 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2054 			   I915_READ(MAD_DIMM_C0));
2055 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2056 			   I915_READ(MAD_DIMM_C1));
2057 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2058 			   I915_READ(MAD_DIMM_C2));
2059 		seq_printf(m, "TILECTL = 0x%08x\n",
2060 			   I915_READ(TILECTL));
2061 		if (INTEL_GEN(dev_priv) >= 8)
2062 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2063 				   I915_READ(GAMTARBMODE));
2064 		else
2065 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2066 				   I915_READ(ARB_MODE));
2067 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2068 			   I915_READ(DISP_ARB_CTL));
2069 	}
2070 
2071 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2072 		seq_puts(m, "L-shaped memory detected\n");
2073 
2074 	intel_runtime_pm_put(dev_priv);
2075 
2076 	return 0;
2077 }
2078 
2079 static int per_file_ctx(int id, void *ptr, void *data)
2080 {
2081 	struct i915_gem_context *ctx = ptr;
2082 	struct seq_file *m = data;
2083 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2084 
2085 	if (!ppgtt) {
2086 		seq_printf(m, "  no ppgtt for context %d\n",
2087 			   ctx->user_handle);
2088 		return 0;
2089 	}
2090 
2091 	if (i915_gem_context_is_default(ctx))
2092 		seq_puts(m, "  default context:\n");
2093 	else
2094 		seq_printf(m, "  context %d:\n", ctx->user_handle);
2095 	ppgtt->debug_dump(ppgtt, m);
2096 
2097 	return 0;
2098 }
2099 
2100 static void gen8_ppgtt_info(struct seq_file *m,
2101 			    struct drm_i915_private *dev_priv)
2102 {
2103 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2104 	struct intel_engine_cs *engine;
2105 	enum intel_engine_id id;
2106 	int i;
2107 
2108 	if (!ppgtt)
2109 		return;
2110 
2111 	for_each_engine(engine, dev_priv, id) {
2112 		seq_printf(m, "%s\n", engine->name);
2113 		for (i = 0; i < 4; i++) {
2114 			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2115 			pdp <<= 32;
2116 			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2117 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2118 		}
2119 	}
2120 }
2121 
2122 static void gen6_ppgtt_info(struct seq_file *m,
2123 			    struct drm_i915_private *dev_priv)
2124 {
2125 	struct intel_engine_cs *engine;
2126 	enum intel_engine_id id;
2127 
2128 	if (IS_GEN6(dev_priv))
2129 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2130 
2131 	for_each_engine(engine, dev_priv, id) {
2132 		seq_printf(m, "%s\n", engine->name);
2133 		if (IS_GEN7(dev_priv))
2134 			seq_printf(m, "GFX_MODE: 0x%08x\n",
2135 				   I915_READ(RING_MODE_GEN7(engine)));
2136 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2137 			   I915_READ(RING_PP_DIR_BASE(engine)));
2138 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2139 			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
2140 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2141 			   I915_READ(RING_PP_DIR_DCLV(engine)));
2142 	}
2143 	if (dev_priv->mm.aliasing_ppgtt) {
2144 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2145 
2146 		seq_puts(m, "aliasing PPGTT:\n");
2147 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2148 
2149 		ppgtt->debug_dump(ppgtt, m);
2150 	}
2151 
2152 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2153 }
2154 
2155 static int i915_ppgtt_info(struct seq_file *m, void *data)
2156 {
2157 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2158 	struct drm_device *dev = &dev_priv->drm;
2159 	struct drm_file *file;
2160 	int ret;
2161 
2162 	mutex_lock(&dev->filelist_mutex);
2163 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2164 	if (ret)
2165 		goto out_unlock;
2166 
2167 	intel_runtime_pm_get(dev_priv);
2168 
2169 	if (INTEL_GEN(dev_priv) >= 8)
2170 		gen8_ppgtt_info(m, dev_priv);
2171 	else if (INTEL_GEN(dev_priv) >= 6)
2172 		gen6_ppgtt_info(m, dev_priv);
2173 
2174 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2175 		struct drm_i915_file_private *file_priv = file->driver_priv;
2176 		struct task_struct *task;
2177 
2178 		task = get_pid_task(file->pid, PIDTYPE_PID);
2179 		if (!task) {
2180 			ret = -ESRCH;
2181 			goto out_rpm;
2182 		}
2183 		seq_printf(m, "\nproc: %s\n", task->comm);
2184 		put_task_struct(task);
2185 		idr_for_each(&file_priv->context_idr, per_file_ctx,
2186 			     (void *)(unsigned long)m);
2187 	}
2188 
2189 out_rpm:
2190 	intel_runtime_pm_put(dev_priv);
2191 	mutex_unlock(&dev->struct_mutex);
2192 out_unlock:
2193 	mutex_unlock(&dev->filelist_mutex);
2194 	return ret;
2195 }
2196 
2197 static int count_irq_waiters(struct drm_i915_private *i915)
2198 {
2199 	struct intel_engine_cs *engine;
2200 	enum intel_engine_id id;
2201 	int count = 0;
2202 
2203 	for_each_engine(engine, i915, id)
2204 		count += intel_engine_has_waiter(engine);
2205 
2206 	return count;
2207 }
2208 
2209 static const char *rps_power_to_str(unsigned int power)
2210 {
2211 	static const char * const strings[] = {
2212 		[LOW_POWER] = "low power",
2213 		[BETWEEN] = "mixed",
2214 		[HIGH_POWER] = "high power",
2215 	};
2216 
2217 	if (power >= ARRAY_SIZE(strings) || !strings[power])
2218 		return "unknown";
2219 
2220 	return strings[power];
2221 }
2222 
2223 static int i915_rps_boost_info(struct seq_file *m, void *data)
2224 {
2225 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2226 	struct drm_device *dev = &dev_priv->drm;
2227 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
2228 	u32 act_freq = rps->cur_freq;
2229 	struct drm_file *file;
2230 
2231 	if (intel_runtime_pm_get_if_in_use(dev_priv)) {
2232 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2233 			mutex_lock(&dev_priv->pcu_lock);
2234 			act_freq = vlv_punit_read(dev_priv,
2235 						  PUNIT_REG_GPU_FREQ_STS);
2236 			act_freq = (act_freq >> 8) & 0xff;
2237 			mutex_unlock(&dev_priv->pcu_lock);
2238 		} else {
2239 			act_freq = intel_get_cagf(dev_priv,
2240 						  I915_READ(GEN6_RPSTAT1));
2241 		}
2242 		intel_runtime_pm_put(dev_priv);
2243 	}
2244 
2245 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2246 	seq_printf(m, "GPU busy? %s [%d requests]\n",
2247 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2248 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2249 	seq_printf(m, "Boosts outstanding? %d\n",
2250 		   atomic_read(&rps->num_waiters));
2251 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2252 	seq_printf(m, "Frequency requested %d, actual %d\n",
2253 		   intel_gpu_freq(dev_priv, rps->cur_freq),
2254 		   intel_gpu_freq(dev_priv, act_freq));
2255 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2256 		   intel_gpu_freq(dev_priv, rps->min_freq),
2257 		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2258 		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2259 		   intel_gpu_freq(dev_priv, rps->max_freq));
2260 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2261 		   intel_gpu_freq(dev_priv, rps->idle_freq),
2262 		   intel_gpu_freq(dev_priv, rps->efficient_freq),
2263 		   intel_gpu_freq(dev_priv, rps->boost_freq));
2264 
2265 	mutex_lock(&dev->filelist_mutex);
2266 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2267 		struct drm_i915_file_private *file_priv = file->driver_priv;
2268 		struct task_struct *task;
2269 
2270 		rcu_read_lock();
2271 		task = pid_task(file->pid, PIDTYPE_PID);
2272 		seq_printf(m, "%s [%d]: %d boosts\n",
2273 			   task ? task->comm : "<unknown>",
2274 			   task ? task->pid : -1,
2275 			   atomic_read(&file_priv->rps_client.boosts));
2276 		rcu_read_unlock();
2277 	}
2278 	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2279 		   atomic_read(&rps->boosts));
2280 	mutex_unlock(&dev->filelist_mutex);
2281 
2282 	if (INTEL_GEN(dev_priv) >= 6 &&
2283 	    rps->enabled &&
2284 	    dev_priv->gt.active_requests) {
2285 		u32 rpup, rpupei;
2286 		u32 rpdown, rpdownei;
2287 
2288 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2289 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2290 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2291 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2292 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2293 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2294 
2295 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2296 			   rps_power_to_str(rps->power.mode));
2297 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2298 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
2299 			   rps->power.up_threshold);
2300 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2301 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2302 			   rps->power.down_threshold);
2303 	} else {
2304 		seq_puts(m, "\nRPS Autotuning inactive\n");
2305 	}
2306 
2307 	return 0;
2308 }
2309 
2310 static int i915_llc(struct seq_file *m, void *data)
2311 {
2312 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2313 	const bool edram = INTEL_GEN(dev_priv) > 8;
2314 
2315 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2316 	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2317 		   intel_uncore_edram_size(dev_priv)/1024/1024);
2318 
2319 	return 0;
2320 }
2321 
2322 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2323 {
2324 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2325 	struct drm_printer p;
2326 
2327 	if (!HAS_HUC(dev_priv))
2328 		return -ENODEV;
2329 
2330 	p = drm_seq_file_printer(m);
2331 	intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2332 
2333 	intel_runtime_pm_get(dev_priv);
2334 	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2335 	intel_runtime_pm_put(dev_priv);
2336 
2337 	return 0;
2338 }
2339 
2340 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2341 {
2342 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2343 	struct drm_printer p;
2344 	u32 tmp, i;
2345 
2346 	if (!HAS_GUC(dev_priv))
2347 		return -ENODEV;
2348 
2349 	p = drm_seq_file_printer(m);
2350 	intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2351 
2352 	intel_runtime_pm_get(dev_priv);
2353 
2354 	tmp = I915_READ(GUC_STATUS);
2355 
2356 	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2357 	seq_printf(m, "\tBootrom status = 0x%x\n",
2358 		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2359 	seq_printf(m, "\tuKernel status = 0x%x\n",
2360 		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2361 	seq_printf(m, "\tMIA Core status = 0x%x\n",
2362 		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2363 	seq_puts(m, "\nScratch registers:\n");
2364 	for (i = 0; i < 16; i++)
2365 		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2366 
2367 	intel_runtime_pm_put(dev_priv);
2368 
2369 	return 0;
2370 }
2371 
2372 static const char *
2373 stringify_guc_log_type(enum guc_log_buffer_type type)
2374 {
2375 	switch (type) {
2376 	case GUC_ISR_LOG_BUFFER:
2377 		return "ISR";
2378 	case GUC_DPC_LOG_BUFFER:
2379 		return "DPC";
2380 	case GUC_CRASH_DUMP_LOG_BUFFER:
2381 		return "CRASH";
2382 	default:
2383 		MISSING_CASE(type);
2384 	}
2385 
2386 	return "";
2387 }
2388 
2389 static void i915_guc_log_info(struct seq_file *m,
2390 			      struct drm_i915_private *dev_priv)
2391 {
2392 	struct intel_guc_log *log = &dev_priv->guc.log;
2393 	enum guc_log_buffer_type type;
2394 
2395 	if (!intel_guc_log_relay_enabled(log)) {
2396 		seq_puts(m, "GuC log relay disabled\n");
2397 		return;
2398 	}
2399 
2400 	seq_puts(m, "GuC logging stats:\n");
2401 
2402 	seq_printf(m, "\tRelay full count: %u\n",
2403 		   log->relay.full_count);
2404 
2405 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2406 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2407 			   stringify_guc_log_type(type),
2408 			   log->stats[type].flush,
2409 			   log->stats[type].sampled_overflow);
2410 	}
2411 }
2412 
2413 static void i915_guc_client_info(struct seq_file *m,
2414 				 struct drm_i915_private *dev_priv,
2415 				 struct intel_guc_client *client)
2416 {
2417 	struct intel_engine_cs *engine;
2418 	enum intel_engine_id id;
2419 	uint64_t tot = 0;
2420 
2421 	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2422 		client->priority, client->stage_id, client->proc_desc_offset);
2423 	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2424 		client->doorbell_id, client->doorbell_offset);
2425 
2426 	for_each_engine(engine, dev_priv, id) {
2427 		u64 submissions = client->submissions[id];
2428 		tot += submissions;
2429 		seq_printf(m, "\tSubmissions: %llu %s\n",
2430 				submissions, engine->name);
2431 	}
2432 	seq_printf(m, "\tTotal: %llu\n", tot);
2433 }
2434 
2435 static int i915_guc_info(struct seq_file *m, void *data)
2436 {
2437 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2438 	const struct intel_guc *guc = &dev_priv->guc;
2439 
2440 	if (!USES_GUC(dev_priv))
2441 		return -ENODEV;
2442 
2443 	i915_guc_log_info(m, dev_priv);
2444 
2445 	if (!USES_GUC_SUBMISSION(dev_priv))
2446 		return 0;
2447 
2448 	GEM_BUG_ON(!guc->execbuf_client);
2449 
2450 	seq_printf(m, "\nDoorbell map:\n");
2451 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2452 	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2453 
2454 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2455 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2456 	if (guc->preempt_client) {
2457 		seq_printf(m, "\nGuC preempt client @ %p:\n",
2458 			   guc->preempt_client);
2459 		i915_guc_client_info(m, dev_priv, guc->preempt_client);
2460 	}
2461 
2462 	/* Add more as required ... */
2463 
2464 	return 0;
2465 }
2466 
2467 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2468 {
2469 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2470 	const struct intel_guc *guc = &dev_priv->guc;
2471 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2472 	struct intel_guc_client *client = guc->execbuf_client;
2473 	unsigned int tmp;
2474 	int index;
2475 
2476 	if (!USES_GUC_SUBMISSION(dev_priv))
2477 		return -ENODEV;
2478 
2479 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2480 		struct intel_engine_cs *engine;
2481 
2482 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2483 			continue;
2484 
2485 		seq_printf(m, "GuC stage descriptor %u:\n", index);
2486 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2487 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2488 		seq_printf(m, "\tPriority: %d\n", desc->priority);
2489 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2490 		seq_printf(m, "\tEngines used: 0x%x\n",
2491 			   desc->engines_used);
2492 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2493 			   desc->db_trigger_phy,
2494 			   desc->db_trigger_cpu,
2495 			   desc->db_trigger_uk);
2496 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
2497 			   desc->process_desc);
2498 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2499 			   desc->wq_addr, desc->wq_size);
2500 		seq_putc(m, '\n');
2501 
2502 		for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2503 			u32 guc_engine_id = engine->guc_id;
2504 			struct guc_execlist_context *lrc =
2505 						&desc->lrc[guc_engine_id];
2506 
2507 			seq_printf(m, "\t%s LRC:\n", engine->name);
2508 			seq_printf(m, "\t\tContext desc: 0x%x\n",
2509 				   lrc->context_desc);
2510 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2511 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2512 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2513 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2514 			seq_putc(m, '\n');
2515 		}
2516 	}
2517 
2518 	return 0;
2519 }
2520 
2521 static int i915_guc_log_dump(struct seq_file *m, void *data)
2522 {
2523 	struct drm_info_node *node = m->private;
2524 	struct drm_i915_private *dev_priv = node_to_i915(node);
2525 	bool dump_load_err = !!node->info_ent->data;
2526 	struct drm_i915_gem_object *obj = NULL;
2527 	u32 *log;
2528 	int i = 0;
2529 
2530 	if (!HAS_GUC(dev_priv))
2531 		return -ENODEV;
2532 
2533 	if (dump_load_err)
2534 		obj = dev_priv->guc.load_err_log;
2535 	else if (dev_priv->guc.log.vma)
2536 		obj = dev_priv->guc.log.vma->obj;
2537 
2538 	if (!obj)
2539 		return 0;
2540 
2541 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2542 	if (IS_ERR(log)) {
2543 		DRM_DEBUG("Failed to pin object\n");
2544 		seq_puts(m, "(log data unaccessible)\n");
2545 		return PTR_ERR(log);
2546 	}
2547 
2548 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2549 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2550 			   *(log + i), *(log + i + 1),
2551 			   *(log + i + 2), *(log + i + 3));
2552 
2553 	seq_putc(m, '\n');
2554 
2555 	i915_gem_object_unpin_map(obj);
2556 
2557 	return 0;
2558 }
2559 
2560 static int i915_guc_log_level_get(void *data, u64 *val)
2561 {
2562 	struct drm_i915_private *dev_priv = data;
2563 
2564 	if (!USES_GUC(dev_priv))
2565 		return -ENODEV;
2566 
2567 	*val = intel_guc_log_get_level(&dev_priv->guc.log);
2568 
2569 	return 0;
2570 }
2571 
2572 static int i915_guc_log_level_set(void *data, u64 val)
2573 {
2574 	struct drm_i915_private *dev_priv = data;
2575 
2576 	if (!USES_GUC(dev_priv))
2577 		return -ENODEV;
2578 
2579 	return intel_guc_log_set_level(&dev_priv->guc.log, val);
2580 }
2581 
2582 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2583 			i915_guc_log_level_get, i915_guc_log_level_set,
2584 			"%lld\n");
2585 
2586 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2587 {
2588 	struct drm_i915_private *dev_priv = inode->i_private;
2589 
2590 	if (!USES_GUC(dev_priv))
2591 		return -ENODEV;
2592 
2593 	file->private_data = &dev_priv->guc.log;
2594 
2595 	return intel_guc_log_relay_open(&dev_priv->guc.log);
2596 }
2597 
2598 static ssize_t
2599 i915_guc_log_relay_write(struct file *filp,
2600 			 const char __user *ubuf,
2601 			 size_t cnt,
2602 			 loff_t *ppos)
2603 {
2604 	struct intel_guc_log *log = filp->private_data;
2605 
2606 	intel_guc_log_relay_flush(log);
2607 
2608 	return cnt;
2609 }
2610 
2611 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2612 {
2613 	struct drm_i915_private *dev_priv = inode->i_private;
2614 
2615 	intel_guc_log_relay_close(&dev_priv->guc.log);
2616 
2617 	return 0;
2618 }
2619 
2620 static const struct file_operations i915_guc_log_relay_fops = {
2621 	.owner = THIS_MODULE,
2622 	.open = i915_guc_log_relay_open,
2623 	.write = i915_guc_log_relay_write,
2624 	.release = i915_guc_log_relay_release,
2625 };
2626 
2627 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2628 {
2629 	u8 val;
2630 	static const char * const sink_status[] = {
2631 		"inactive",
2632 		"transition to active, capture and display",
2633 		"active, display from RFB",
2634 		"active, capture and display on sink device timings",
2635 		"transition to inactive, capture and display, timing re-sync",
2636 		"reserved",
2637 		"reserved",
2638 		"sink internal error",
2639 	};
2640 	struct drm_connector *connector = m->private;
2641 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2642 	struct intel_dp *intel_dp =
2643 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2644 	int ret;
2645 
2646 	if (!CAN_PSR(dev_priv)) {
2647 		seq_puts(m, "PSR Unsupported\n");
2648 		return -ENODEV;
2649 	}
2650 
2651 	if (connector->status != connector_status_connected)
2652 		return -ENODEV;
2653 
2654 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2655 
2656 	if (ret == 1) {
2657 		const char *str = "unknown";
2658 
2659 		val &= DP_PSR_SINK_STATE_MASK;
2660 		if (val < ARRAY_SIZE(sink_status))
2661 			str = sink_status[val];
2662 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2663 	} else {
2664 		return ret;
2665 	}
2666 
2667 	return 0;
2668 }
2669 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2670 
2671 static void
2672 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2673 {
2674 	u32 val, psr_status;
2675 
2676 	if (dev_priv->psr.psr2_enabled) {
2677 		static const char * const live_status[] = {
2678 			"IDLE",
2679 			"CAPTURE",
2680 			"CAPTURE_FS",
2681 			"SLEEP",
2682 			"BUFON_FW",
2683 			"ML_UP",
2684 			"SU_STANDBY",
2685 			"FAST_SLEEP",
2686 			"DEEP_SLEEP",
2687 			"BUF_ON",
2688 			"TG_ON"
2689 		};
2690 		psr_status = I915_READ(EDP_PSR2_STATUS);
2691 		val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2692 			EDP_PSR2_STATUS_STATE_SHIFT;
2693 		if (val < ARRAY_SIZE(live_status)) {
2694 			seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2695 				   psr_status, live_status[val]);
2696 			return;
2697 		}
2698 	} else {
2699 		static const char * const live_status[] = {
2700 			"IDLE",
2701 			"SRDONACK",
2702 			"SRDENT",
2703 			"BUFOFF",
2704 			"BUFON",
2705 			"AUXACK",
2706 			"SRDOFFACK",
2707 			"SRDENT_ON",
2708 		};
2709 		psr_status = I915_READ(EDP_PSR_STATUS);
2710 		val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2711 			EDP_PSR_STATUS_STATE_SHIFT;
2712 		if (val < ARRAY_SIZE(live_status)) {
2713 			seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2714 				   psr_status, live_status[val]);
2715 			return;
2716 		}
2717 	}
2718 
2719 	seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
2720 }
2721 
2722 static int i915_edp_psr_status(struct seq_file *m, void *data)
2723 {
2724 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2725 	u32 psrperf = 0;
2726 	bool enabled = false;
2727 	bool sink_support;
2728 
2729 	if (!HAS_PSR(dev_priv))
2730 		return -ENODEV;
2731 
2732 	sink_support = dev_priv->psr.sink_support;
2733 	seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2734 	if (!sink_support)
2735 		return 0;
2736 
2737 	intel_runtime_pm_get(dev_priv);
2738 
2739 	mutex_lock(&dev_priv->psr.lock);
2740 	seq_printf(m, "PSR mode: %s\n",
2741 		   dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
2742 	seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
2743 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2744 		   dev_priv->psr.busy_frontbuffer_bits);
2745 
2746 	if (dev_priv->psr.psr2_enabled)
2747 		enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2748 	else
2749 		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2750 
2751 	seq_printf(m, "Main link in standby mode: %s\n",
2752 		   yesno(dev_priv->psr.link_standby));
2753 
2754 	seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2755 
2756 	/*
2757 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2758 	 */
2759 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2760 		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2761 			EDP_PSR_PERF_CNT_MASK;
2762 
2763 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2764 	}
2765 
2766 	psr_source_status(dev_priv, m);
2767 	mutex_unlock(&dev_priv->psr.lock);
2768 
2769 	if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
2770 		seq_printf(m, "Last attempted entry at: %lld\n",
2771 			   dev_priv->psr.last_entry_attempt);
2772 		seq_printf(m, "Last exit at: %lld\n",
2773 			   dev_priv->psr.last_exit);
2774 	}
2775 
2776 	intel_runtime_pm_put(dev_priv);
2777 	return 0;
2778 }
2779 
2780 static int
2781 i915_edp_psr_debug_set(void *data, u64 val)
2782 {
2783 	struct drm_i915_private *dev_priv = data;
2784 	struct drm_modeset_acquire_ctx ctx;
2785 	int ret;
2786 
2787 	if (!CAN_PSR(dev_priv))
2788 		return -ENODEV;
2789 
2790 	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2791 
2792 	intel_runtime_pm_get(dev_priv);
2793 
2794 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2795 
2796 retry:
2797 	ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2798 	if (ret == -EDEADLK) {
2799 		ret = drm_modeset_backoff(&ctx);
2800 		if (!ret)
2801 			goto retry;
2802 	}
2803 
2804 	drm_modeset_drop_locks(&ctx);
2805 	drm_modeset_acquire_fini(&ctx);
2806 
2807 	intel_runtime_pm_put(dev_priv);
2808 
2809 	return ret;
2810 }
2811 
2812 static int
2813 i915_edp_psr_debug_get(void *data, u64 *val)
2814 {
2815 	struct drm_i915_private *dev_priv = data;
2816 
2817 	if (!CAN_PSR(dev_priv))
2818 		return -ENODEV;
2819 
2820 	*val = READ_ONCE(dev_priv->psr.debug);
2821 	return 0;
2822 }
2823 
2824 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2825 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2826 			"%llu\n");
2827 
2828 static int i915_energy_uJ(struct seq_file *m, void *data)
2829 {
2830 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2831 	unsigned long long power;
2832 	u32 units;
2833 
2834 	if (INTEL_GEN(dev_priv) < 6)
2835 		return -ENODEV;
2836 
2837 	intel_runtime_pm_get(dev_priv);
2838 
2839 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2840 		intel_runtime_pm_put(dev_priv);
2841 		return -ENODEV;
2842 	}
2843 
2844 	units = (power & 0x1f00) >> 8;
2845 	power = I915_READ(MCH_SECP_NRG_STTS);
2846 	power = (1000000 * power) >> units; /* convert to uJ */
2847 
2848 	intel_runtime_pm_put(dev_priv);
2849 
2850 	seq_printf(m, "%llu", power);
2851 
2852 	return 0;
2853 }
2854 
2855 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2856 {
2857 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2858 	struct pci_dev *pdev = dev_priv->drm.pdev;
2859 
2860 	if (!HAS_RUNTIME_PM(dev_priv))
2861 		seq_puts(m, "Runtime power management not supported\n");
2862 
2863 	seq_printf(m, "GPU idle: %s (epoch %u)\n",
2864 		   yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2865 	seq_printf(m, "IRQs disabled: %s\n",
2866 		   yesno(!intel_irqs_enabled(dev_priv)));
2867 #ifdef CONFIG_PM
2868 	seq_printf(m, "Usage count: %d\n",
2869 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2870 #else
2871 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2872 #endif
2873 	seq_printf(m, "PCI device power state: %s [%d]\n",
2874 		   pci_power_name(pdev->current_state),
2875 		   pdev->current_state);
2876 
2877 	return 0;
2878 }
2879 
2880 static int i915_power_domain_info(struct seq_file *m, void *unused)
2881 {
2882 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2883 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2884 	int i;
2885 
2886 	mutex_lock(&power_domains->lock);
2887 
2888 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2889 	for (i = 0; i < power_domains->power_well_count; i++) {
2890 		struct i915_power_well *power_well;
2891 		enum intel_display_power_domain power_domain;
2892 
2893 		power_well = &power_domains->power_wells[i];
2894 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
2895 			   power_well->count);
2896 
2897 		for_each_power_domain(power_domain, power_well->desc->domains)
2898 			seq_printf(m, "  %-23s %d\n",
2899 				 intel_display_power_domain_str(power_domain),
2900 				 power_domains->domain_use_count[power_domain]);
2901 	}
2902 
2903 	mutex_unlock(&power_domains->lock);
2904 
2905 	return 0;
2906 }
2907 
2908 static int i915_dmc_info(struct seq_file *m, void *unused)
2909 {
2910 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2911 	struct intel_csr *csr;
2912 
2913 	if (!HAS_CSR(dev_priv))
2914 		return -ENODEV;
2915 
2916 	csr = &dev_priv->csr;
2917 
2918 	intel_runtime_pm_get(dev_priv);
2919 
2920 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2921 	seq_printf(m, "path: %s\n", csr->fw_path);
2922 
2923 	if (!csr->dmc_payload)
2924 		goto out;
2925 
2926 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2927 		   CSR_VERSION_MINOR(csr->version));
2928 
2929 	if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2930 		goto out;
2931 
2932 	seq_printf(m, "DC3 -> DC5 count: %d\n",
2933 		   I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2934 						    SKL_CSR_DC3_DC5_COUNT));
2935 	if (!IS_GEN9_LP(dev_priv))
2936 		seq_printf(m, "DC5 -> DC6 count: %d\n",
2937 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2938 
2939 out:
2940 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2941 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2942 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2943 
2944 	intel_runtime_pm_put(dev_priv);
2945 
2946 	return 0;
2947 }
2948 
2949 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2950 				 struct drm_display_mode *mode)
2951 {
2952 	int i;
2953 
2954 	for (i = 0; i < tabs; i++)
2955 		seq_putc(m, '\t');
2956 
2957 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2958 		   mode->base.id, mode->name,
2959 		   mode->vrefresh, mode->clock,
2960 		   mode->hdisplay, mode->hsync_start,
2961 		   mode->hsync_end, mode->htotal,
2962 		   mode->vdisplay, mode->vsync_start,
2963 		   mode->vsync_end, mode->vtotal,
2964 		   mode->type, mode->flags);
2965 }
2966 
2967 static void intel_encoder_info(struct seq_file *m,
2968 			       struct intel_crtc *intel_crtc,
2969 			       struct intel_encoder *intel_encoder)
2970 {
2971 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2972 	struct drm_device *dev = &dev_priv->drm;
2973 	struct drm_crtc *crtc = &intel_crtc->base;
2974 	struct intel_connector *intel_connector;
2975 	struct drm_encoder *encoder;
2976 
2977 	encoder = &intel_encoder->base;
2978 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2979 		   encoder->base.id, encoder->name);
2980 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2981 		struct drm_connector *connector = &intel_connector->base;
2982 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2983 			   connector->base.id,
2984 			   connector->name,
2985 			   drm_get_connector_status_name(connector->status));
2986 		if (connector->status == connector_status_connected) {
2987 			struct drm_display_mode *mode = &crtc->mode;
2988 			seq_printf(m, ", mode:\n");
2989 			intel_seq_print_mode(m, 2, mode);
2990 		} else {
2991 			seq_putc(m, '\n');
2992 		}
2993 	}
2994 }
2995 
2996 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2997 {
2998 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2999 	struct drm_device *dev = &dev_priv->drm;
3000 	struct drm_crtc *crtc = &intel_crtc->base;
3001 	struct intel_encoder *intel_encoder;
3002 	struct drm_plane_state *plane_state = crtc->primary->state;
3003 	struct drm_framebuffer *fb = plane_state->fb;
3004 
3005 	if (fb)
3006 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
3007 			   fb->base.id, plane_state->src_x >> 16,
3008 			   plane_state->src_y >> 16, fb->width, fb->height);
3009 	else
3010 		seq_puts(m, "\tprimary plane disabled\n");
3011 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3012 		intel_encoder_info(m, intel_crtc, intel_encoder);
3013 }
3014 
3015 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3016 {
3017 	struct drm_display_mode *mode = panel->fixed_mode;
3018 
3019 	seq_printf(m, "\tfixed mode:\n");
3020 	intel_seq_print_mode(m, 2, mode);
3021 }
3022 
3023 static void intel_dp_info(struct seq_file *m,
3024 			  struct intel_connector *intel_connector)
3025 {
3026 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3027 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3028 
3029 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3030 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3031 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3032 		intel_panel_info(m, &intel_connector->panel);
3033 
3034 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3035 				&intel_dp->aux);
3036 }
3037 
3038 static void intel_dp_mst_info(struct seq_file *m,
3039 			  struct intel_connector *intel_connector)
3040 {
3041 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3042 	struct intel_dp_mst_encoder *intel_mst =
3043 		enc_to_mst(&intel_encoder->base);
3044 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
3045 	struct intel_dp *intel_dp = &intel_dig_port->dp;
3046 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3047 					intel_connector->port);
3048 
3049 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3050 }
3051 
3052 static void intel_hdmi_info(struct seq_file *m,
3053 			    struct intel_connector *intel_connector)
3054 {
3055 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3056 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3057 
3058 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3059 }
3060 
3061 static void intel_lvds_info(struct seq_file *m,
3062 			    struct intel_connector *intel_connector)
3063 {
3064 	intel_panel_info(m, &intel_connector->panel);
3065 }
3066 
3067 static void intel_connector_info(struct seq_file *m,
3068 				 struct drm_connector *connector)
3069 {
3070 	struct intel_connector *intel_connector = to_intel_connector(connector);
3071 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3072 	struct drm_display_mode *mode;
3073 
3074 	seq_printf(m, "connector %d: type %s, status: %s\n",
3075 		   connector->base.id, connector->name,
3076 		   drm_get_connector_status_name(connector->status));
3077 
3078 	if (connector->status == connector_status_disconnected)
3079 		return;
3080 
3081 	seq_printf(m, "\tname: %s\n", connector->display_info.name);
3082 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3083 		   connector->display_info.width_mm,
3084 		   connector->display_info.height_mm);
3085 	seq_printf(m, "\tsubpixel order: %s\n",
3086 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3087 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
3088 
3089 	if (!intel_encoder)
3090 		return;
3091 
3092 	switch (connector->connector_type) {
3093 	case DRM_MODE_CONNECTOR_DisplayPort:
3094 	case DRM_MODE_CONNECTOR_eDP:
3095 		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3096 			intel_dp_mst_info(m, intel_connector);
3097 		else
3098 			intel_dp_info(m, intel_connector);
3099 		break;
3100 	case DRM_MODE_CONNECTOR_LVDS:
3101 		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3102 			intel_lvds_info(m, intel_connector);
3103 		break;
3104 	case DRM_MODE_CONNECTOR_HDMIA:
3105 		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3106 		    intel_encoder->type == INTEL_OUTPUT_DDI)
3107 			intel_hdmi_info(m, intel_connector);
3108 		break;
3109 	default:
3110 		break;
3111 	}
3112 
3113 	seq_printf(m, "\tmodes:\n");
3114 	list_for_each_entry(mode, &connector->modes, head)
3115 		intel_seq_print_mode(m, 2, mode);
3116 }
3117 
3118 static const char *plane_type(enum drm_plane_type type)
3119 {
3120 	switch (type) {
3121 	case DRM_PLANE_TYPE_OVERLAY:
3122 		return "OVL";
3123 	case DRM_PLANE_TYPE_PRIMARY:
3124 		return "PRI";
3125 	case DRM_PLANE_TYPE_CURSOR:
3126 		return "CUR";
3127 	/*
3128 	 * Deliberately omitting default: to generate compiler warnings
3129 	 * when a new drm_plane_type gets added.
3130 	 */
3131 	}
3132 
3133 	return "unknown";
3134 }
3135 
3136 static const char *plane_rotation(unsigned int rotation)
3137 {
3138 	static char buf[48];
3139 	/*
3140 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3141 	 * will print them all to visualize if the values are misused
3142 	 */
3143 	snprintf(buf, sizeof(buf),
3144 		 "%s%s%s%s%s%s(0x%08x)",
3145 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3146 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3147 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3148 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3149 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3150 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3151 		 rotation);
3152 
3153 	return buf;
3154 }
3155 
3156 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3157 {
3158 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3159 	struct drm_device *dev = &dev_priv->drm;
3160 	struct intel_plane *intel_plane;
3161 
3162 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3163 		struct drm_plane_state *state;
3164 		struct drm_plane *plane = &intel_plane->base;
3165 		struct drm_format_name_buf format_name;
3166 
3167 		if (!plane->state) {
3168 			seq_puts(m, "plane->state is NULL!\n");
3169 			continue;
3170 		}
3171 
3172 		state = plane->state;
3173 
3174 		if (state->fb) {
3175 			drm_get_format_name(state->fb->format->format,
3176 					    &format_name);
3177 		} else {
3178 			sprintf(format_name.str, "N/A");
3179 		}
3180 
3181 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3182 			   plane->base.id,
3183 			   plane_type(intel_plane->base.type),
3184 			   state->crtc_x, state->crtc_y,
3185 			   state->crtc_w, state->crtc_h,
3186 			   (state->src_x >> 16),
3187 			   ((state->src_x & 0xffff) * 15625) >> 10,
3188 			   (state->src_y >> 16),
3189 			   ((state->src_y & 0xffff) * 15625) >> 10,
3190 			   (state->src_w >> 16),
3191 			   ((state->src_w & 0xffff) * 15625) >> 10,
3192 			   (state->src_h >> 16),
3193 			   ((state->src_h & 0xffff) * 15625) >> 10,
3194 			   format_name.str,
3195 			   plane_rotation(state->rotation));
3196 	}
3197 }
3198 
3199 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3200 {
3201 	struct intel_crtc_state *pipe_config;
3202 	int num_scalers = intel_crtc->num_scalers;
3203 	int i;
3204 
3205 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3206 
3207 	/* Not all platformas have a scaler */
3208 	if (num_scalers) {
3209 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3210 			   num_scalers,
3211 			   pipe_config->scaler_state.scaler_users,
3212 			   pipe_config->scaler_state.scaler_id);
3213 
3214 		for (i = 0; i < num_scalers; i++) {
3215 			struct intel_scaler *sc =
3216 					&pipe_config->scaler_state.scalers[i];
3217 
3218 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3219 				   i, yesno(sc->in_use), sc->mode);
3220 		}
3221 		seq_puts(m, "\n");
3222 	} else {
3223 		seq_puts(m, "\tNo scalers available on this platform\n");
3224 	}
3225 }
3226 
3227 static int i915_display_info(struct seq_file *m, void *unused)
3228 {
3229 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3230 	struct drm_device *dev = &dev_priv->drm;
3231 	struct intel_crtc *crtc;
3232 	struct drm_connector *connector;
3233 	struct drm_connector_list_iter conn_iter;
3234 
3235 	intel_runtime_pm_get(dev_priv);
3236 	seq_printf(m, "CRTC info\n");
3237 	seq_printf(m, "---------\n");
3238 	for_each_intel_crtc(dev, crtc) {
3239 		struct intel_crtc_state *pipe_config;
3240 
3241 		drm_modeset_lock(&crtc->base.mutex, NULL);
3242 		pipe_config = to_intel_crtc_state(crtc->base.state);
3243 
3244 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3245 			   crtc->base.base.id, pipe_name(crtc->pipe),
3246 			   yesno(pipe_config->base.active),
3247 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3248 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3249 
3250 		if (pipe_config->base.active) {
3251 			struct intel_plane *cursor =
3252 				to_intel_plane(crtc->base.cursor);
3253 
3254 			intel_crtc_info(m, crtc);
3255 
3256 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3257 				   yesno(cursor->base.state->visible),
3258 				   cursor->base.state->crtc_x,
3259 				   cursor->base.state->crtc_y,
3260 				   cursor->base.state->crtc_w,
3261 				   cursor->base.state->crtc_h,
3262 				   cursor->cursor.base);
3263 			intel_scaler_info(m, crtc);
3264 			intel_plane_info(m, crtc);
3265 		}
3266 
3267 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3268 			   yesno(!crtc->cpu_fifo_underrun_disabled),
3269 			   yesno(!crtc->pch_fifo_underrun_disabled));
3270 		drm_modeset_unlock(&crtc->base.mutex);
3271 	}
3272 
3273 	seq_printf(m, "\n");
3274 	seq_printf(m, "Connector info\n");
3275 	seq_printf(m, "--------------\n");
3276 	mutex_lock(&dev->mode_config.mutex);
3277 	drm_connector_list_iter_begin(dev, &conn_iter);
3278 	drm_for_each_connector_iter(connector, &conn_iter)
3279 		intel_connector_info(m, connector);
3280 	drm_connector_list_iter_end(&conn_iter);
3281 	mutex_unlock(&dev->mode_config.mutex);
3282 
3283 	intel_runtime_pm_put(dev_priv);
3284 
3285 	return 0;
3286 }
3287 
3288 static int i915_engine_info(struct seq_file *m, void *unused)
3289 {
3290 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3291 	struct intel_engine_cs *engine;
3292 	enum intel_engine_id id;
3293 	struct drm_printer p;
3294 
3295 	intel_runtime_pm_get(dev_priv);
3296 
3297 	seq_printf(m, "GT awake? %s (epoch %u)\n",
3298 		   yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3299 	seq_printf(m, "Global active requests: %d\n",
3300 		   dev_priv->gt.active_requests);
3301 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
3302 		   dev_priv->info.cs_timestamp_frequency_khz);
3303 
3304 	p = drm_seq_file_printer(m);
3305 	for_each_engine(engine, dev_priv, id)
3306 		intel_engine_dump(engine, &p, "%s\n", engine->name);
3307 
3308 	intel_runtime_pm_put(dev_priv);
3309 
3310 	return 0;
3311 }
3312 
3313 static int i915_rcs_topology(struct seq_file *m, void *unused)
3314 {
3315 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3316 	struct drm_printer p = drm_seq_file_printer(m);
3317 
3318 	intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3319 
3320 	return 0;
3321 }
3322 
3323 static int i915_shrinker_info(struct seq_file *m, void *unused)
3324 {
3325 	struct drm_i915_private *i915 = node_to_i915(m->private);
3326 
3327 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3328 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3329 
3330 	return 0;
3331 }
3332 
3333 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3334 {
3335 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3336 	struct drm_device *dev = &dev_priv->drm;
3337 	int i;
3338 
3339 	drm_modeset_lock_all(dev);
3340 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3341 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3342 
3343 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3344 			   pll->info->id);
3345 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3346 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3347 		seq_printf(m, " tracked hardware state:\n");
3348 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3349 		seq_printf(m, " dpll_md: 0x%08x\n",
3350 			   pll->state.hw_state.dpll_md);
3351 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3352 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3353 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3354 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3355 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3356 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3357 			   pll->state.hw_state.mg_refclkin_ctl);
3358 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3359 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
3360 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3361 			   pll->state.hw_state.mg_clktop2_hsclkctl);
3362 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
3363 			   pll->state.hw_state.mg_pll_div0);
3364 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
3365 			   pll->state.hw_state.mg_pll_div1);
3366 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
3367 			   pll->state.hw_state.mg_pll_lf);
3368 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3369 			   pll->state.hw_state.mg_pll_frac_lock);
3370 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3371 			   pll->state.hw_state.mg_pll_ssc);
3372 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
3373 			   pll->state.hw_state.mg_pll_bias);
3374 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3375 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
3376 	}
3377 	drm_modeset_unlock_all(dev);
3378 
3379 	return 0;
3380 }
3381 
3382 static int i915_wa_registers(struct seq_file *m, void *unused)
3383 {
3384 	struct drm_i915_private *i915 = node_to_i915(m->private);
3385 	const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
3386 	struct i915_wa *wa;
3387 	unsigned int i;
3388 
3389 	seq_printf(m, "Workarounds applied: %u\n", wal->count);
3390 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3391 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3392 			   i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3393 
3394 	return 0;
3395 }
3396 
3397 static int i915_ipc_status_show(struct seq_file *m, void *data)
3398 {
3399 	struct drm_i915_private *dev_priv = m->private;
3400 
3401 	seq_printf(m, "Isochronous Priority Control: %s\n",
3402 			yesno(dev_priv->ipc_enabled));
3403 	return 0;
3404 }
3405 
3406 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3407 {
3408 	struct drm_i915_private *dev_priv = inode->i_private;
3409 
3410 	if (!HAS_IPC(dev_priv))
3411 		return -ENODEV;
3412 
3413 	return single_open(file, i915_ipc_status_show, dev_priv);
3414 }
3415 
3416 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3417 				     size_t len, loff_t *offp)
3418 {
3419 	struct seq_file *m = file->private_data;
3420 	struct drm_i915_private *dev_priv = m->private;
3421 	int ret;
3422 	bool enable;
3423 
3424 	ret = kstrtobool_from_user(ubuf, len, &enable);
3425 	if (ret < 0)
3426 		return ret;
3427 
3428 	intel_runtime_pm_get(dev_priv);
3429 	if (!dev_priv->ipc_enabled && enable)
3430 		DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3431 	dev_priv->wm.distrust_bios_wm = true;
3432 	dev_priv->ipc_enabled = enable;
3433 	intel_enable_ipc(dev_priv);
3434 	intel_runtime_pm_put(dev_priv);
3435 
3436 	return len;
3437 }
3438 
3439 static const struct file_operations i915_ipc_status_fops = {
3440 	.owner = THIS_MODULE,
3441 	.open = i915_ipc_status_open,
3442 	.read = seq_read,
3443 	.llseek = seq_lseek,
3444 	.release = single_release,
3445 	.write = i915_ipc_status_write
3446 };
3447 
3448 static int i915_ddb_info(struct seq_file *m, void *unused)
3449 {
3450 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3451 	struct drm_device *dev = &dev_priv->drm;
3452 	struct skl_ddb_entry *entry;
3453 	struct intel_crtc *crtc;
3454 
3455 	if (INTEL_GEN(dev_priv) < 9)
3456 		return -ENODEV;
3457 
3458 	drm_modeset_lock_all(dev);
3459 
3460 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3461 
3462 	for_each_intel_crtc(&dev_priv->drm, crtc) {
3463 		struct intel_crtc_state *crtc_state =
3464 			to_intel_crtc_state(crtc->base.state);
3465 		enum pipe pipe = crtc->pipe;
3466 		enum plane_id plane_id;
3467 
3468 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3469 
3470 		for_each_plane_id_on_crtc(crtc, plane_id) {
3471 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3472 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3473 				   entry->start, entry->end,
3474 				   skl_ddb_entry_size(entry));
3475 		}
3476 
3477 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3478 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3479 			   entry->end, skl_ddb_entry_size(entry));
3480 	}
3481 
3482 	drm_modeset_unlock_all(dev);
3483 
3484 	return 0;
3485 }
3486 
3487 static void drrs_status_per_crtc(struct seq_file *m,
3488 				 struct drm_device *dev,
3489 				 struct intel_crtc *intel_crtc)
3490 {
3491 	struct drm_i915_private *dev_priv = to_i915(dev);
3492 	struct i915_drrs *drrs = &dev_priv->drrs;
3493 	int vrefresh = 0;
3494 	struct drm_connector *connector;
3495 	struct drm_connector_list_iter conn_iter;
3496 
3497 	drm_connector_list_iter_begin(dev, &conn_iter);
3498 	drm_for_each_connector_iter(connector, &conn_iter) {
3499 		if (connector->state->crtc != &intel_crtc->base)
3500 			continue;
3501 
3502 		seq_printf(m, "%s:\n", connector->name);
3503 	}
3504 	drm_connector_list_iter_end(&conn_iter);
3505 
3506 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3507 		seq_puts(m, "\tVBT: DRRS_type: Static");
3508 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3509 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3510 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3511 		seq_puts(m, "\tVBT: DRRS_type: None");
3512 	else
3513 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3514 
3515 	seq_puts(m, "\n\n");
3516 
3517 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3518 		struct intel_panel *panel;
3519 
3520 		mutex_lock(&drrs->mutex);
3521 		/* DRRS Supported */
3522 		seq_puts(m, "\tDRRS Supported: Yes\n");
3523 
3524 		/* disable_drrs() will make drrs->dp NULL */
3525 		if (!drrs->dp) {
3526 			seq_puts(m, "Idleness DRRS: Disabled\n");
3527 			if (dev_priv->psr.enabled)
3528 				seq_puts(m,
3529 				"\tAs PSR is enabled, DRRS is not enabled\n");
3530 			mutex_unlock(&drrs->mutex);
3531 			return;
3532 		}
3533 
3534 		panel = &drrs->dp->attached_connector->panel;
3535 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3536 					drrs->busy_frontbuffer_bits);
3537 
3538 		seq_puts(m, "\n\t\t");
3539 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3540 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3541 			vrefresh = panel->fixed_mode->vrefresh;
3542 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3543 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3544 			vrefresh = panel->downclock_mode->vrefresh;
3545 		} else {
3546 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3547 						drrs->refresh_rate_type);
3548 			mutex_unlock(&drrs->mutex);
3549 			return;
3550 		}
3551 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3552 
3553 		seq_puts(m, "\n\t\t");
3554 		mutex_unlock(&drrs->mutex);
3555 	} else {
3556 		/* DRRS not supported. Print the VBT parameter*/
3557 		seq_puts(m, "\tDRRS Supported : No");
3558 	}
3559 	seq_puts(m, "\n");
3560 }
3561 
3562 static int i915_drrs_status(struct seq_file *m, void *unused)
3563 {
3564 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3565 	struct drm_device *dev = &dev_priv->drm;
3566 	struct intel_crtc *intel_crtc;
3567 	int active_crtc_cnt = 0;
3568 
3569 	drm_modeset_lock_all(dev);
3570 	for_each_intel_crtc(dev, intel_crtc) {
3571 		if (intel_crtc->base.state->active) {
3572 			active_crtc_cnt++;
3573 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3574 
3575 			drrs_status_per_crtc(m, dev, intel_crtc);
3576 		}
3577 	}
3578 	drm_modeset_unlock_all(dev);
3579 
3580 	if (!active_crtc_cnt)
3581 		seq_puts(m, "No active crtc found\n");
3582 
3583 	return 0;
3584 }
3585 
3586 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3587 {
3588 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3589 	struct drm_device *dev = &dev_priv->drm;
3590 	struct intel_encoder *intel_encoder;
3591 	struct intel_digital_port *intel_dig_port;
3592 	struct drm_connector *connector;
3593 	struct drm_connector_list_iter conn_iter;
3594 
3595 	drm_connector_list_iter_begin(dev, &conn_iter);
3596 	drm_for_each_connector_iter(connector, &conn_iter) {
3597 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3598 			continue;
3599 
3600 		intel_encoder = intel_attached_encoder(connector);
3601 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3602 			continue;
3603 
3604 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3605 		if (!intel_dig_port->dp.can_mst)
3606 			continue;
3607 
3608 		seq_printf(m, "MST Source Port %c\n",
3609 			   port_name(intel_dig_port->base.port));
3610 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3611 	}
3612 	drm_connector_list_iter_end(&conn_iter);
3613 
3614 	return 0;
3615 }
3616 
3617 static ssize_t i915_displayport_test_active_write(struct file *file,
3618 						  const char __user *ubuf,
3619 						  size_t len, loff_t *offp)
3620 {
3621 	char *input_buffer;
3622 	int status = 0;
3623 	struct drm_device *dev;
3624 	struct drm_connector *connector;
3625 	struct drm_connector_list_iter conn_iter;
3626 	struct intel_dp *intel_dp;
3627 	int val = 0;
3628 
3629 	dev = ((struct seq_file *)file->private_data)->private;
3630 
3631 	if (len == 0)
3632 		return 0;
3633 
3634 	input_buffer = memdup_user_nul(ubuf, len);
3635 	if (IS_ERR(input_buffer))
3636 		return PTR_ERR(input_buffer);
3637 
3638 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3639 
3640 	drm_connector_list_iter_begin(dev, &conn_iter);
3641 	drm_for_each_connector_iter(connector, &conn_iter) {
3642 		struct intel_encoder *encoder;
3643 
3644 		if (connector->connector_type !=
3645 		    DRM_MODE_CONNECTOR_DisplayPort)
3646 			continue;
3647 
3648 		encoder = to_intel_encoder(connector->encoder);
3649 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3650 			continue;
3651 
3652 		if (encoder && connector->status == connector_status_connected) {
3653 			intel_dp = enc_to_intel_dp(&encoder->base);
3654 			status = kstrtoint(input_buffer, 10, &val);
3655 			if (status < 0)
3656 				break;
3657 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3658 			/* To prevent erroneous activation of the compliance
3659 			 * testing code, only accept an actual value of 1 here
3660 			 */
3661 			if (val == 1)
3662 				intel_dp->compliance.test_active = 1;
3663 			else
3664 				intel_dp->compliance.test_active = 0;
3665 		}
3666 	}
3667 	drm_connector_list_iter_end(&conn_iter);
3668 	kfree(input_buffer);
3669 	if (status < 0)
3670 		return status;
3671 
3672 	*offp += len;
3673 	return len;
3674 }
3675 
3676 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3677 {
3678 	struct drm_i915_private *dev_priv = m->private;
3679 	struct drm_device *dev = &dev_priv->drm;
3680 	struct drm_connector *connector;
3681 	struct drm_connector_list_iter conn_iter;
3682 	struct intel_dp *intel_dp;
3683 
3684 	drm_connector_list_iter_begin(dev, &conn_iter);
3685 	drm_for_each_connector_iter(connector, &conn_iter) {
3686 		struct intel_encoder *encoder;
3687 
3688 		if (connector->connector_type !=
3689 		    DRM_MODE_CONNECTOR_DisplayPort)
3690 			continue;
3691 
3692 		encoder = to_intel_encoder(connector->encoder);
3693 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3694 			continue;
3695 
3696 		if (encoder && connector->status == connector_status_connected) {
3697 			intel_dp = enc_to_intel_dp(&encoder->base);
3698 			if (intel_dp->compliance.test_active)
3699 				seq_puts(m, "1");
3700 			else
3701 				seq_puts(m, "0");
3702 		} else
3703 			seq_puts(m, "0");
3704 	}
3705 	drm_connector_list_iter_end(&conn_iter);
3706 
3707 	return 0;
3708 }
3709 
3710 static int i915_displayport_test_active_open(struct inode *inode,
3711 					     struct file *file)
3712 {
3713 	return single_open(file, i915_displayport_test_active_show,
3714 			   inode->i_private);
3715 }
3716 
3717 static const struct file_operations i915_displayport_test_active_fops = {
3718 	.owner = THIS_MODULE,
3719 	.open = i915_displayport_test_active_open,
3720 	.read = seq_read,
3721 	.llseek = seq_lseek,
3722 	.release = single_release,
3723 	.write = i915_displayport_test_active_write
3724 };
3725 
3726 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3727 {
3728 	struct drm_i915_private *dev_priv = m->private;
3729 	struct drm_device *dev = &dev_priv->drm;
3730 	struct drm_connector *connector;
3731 	struct drm_connector_list_iter conn_iter;
3732 	struct intel_dp *intel_dp;
3733 
3734 	drm_connector_list_iter_begin(dev, &conn_iter);
3735 	drm_for_each_connector_iter(connector, &conn_iter) {
3736 		struct intel_encoder *encoder;
3737 
3738 		if (connector->connector_type !=
3739 		    DRM_MODE_CONNECTOR_DisplayPort)
3740 			continue;
3741 
3742 		encoder = to_intel_encoder(connector->encoder);
3743 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3744 			continue;
3745 
3746 		if (encoder && connector->status == connector_status_connected) {
3747 			intel_dp = enc_to_intel_dp(&encoder->base);
3748 			if (intel_dp->compliance.test_type ==
3749 			    DP_TEST_LINK_EDID_READ)
3750 				seq_printf(m, "%lx",
3751 					   intel_dp->compliance.test_data.edid);
3752 			else if (intel_dp->compliance.test_type ==
3753 				 DP_TEST_LINK_VIDEO_PATTERN) {
3754 				seq_printf(m, "hdisplay: %d\n",
3755 					   intel_dp->compliance.test_data.hdisplay);
3756 				seq_printf(m, "vdisplay: %d\n",
3757 					   intel_dp->compliance.test_data.vdisplay);
3758 				seq_printf(m, "bpc: %u\n",
3759 					   intel_dp->compliance.test_data.bpc);
3760 			}
3761 		} else
3762 			seq_puts(m, "0");
3763 	}
3764 	drm_connector_list_iter_end(&conn_iter);
3765 
3766 	return 0;
3767 }
3768 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3769 
3770 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3771 {
3772 	struct drm_i915_private *dev_priv = m->private;
3773 	struct drm_device *dev = &dev_priv->drm;
3774 	struct drm_connector *connector;
3775 	struct drm_connector_list_iter conn_iter;
3776 	struct intel_dp *intel_dp;
3777 
3778 	drm_connector_list_iter_begin(dev, &conn_iter);
3779 	drm_for_each_connector_iter(connector, &conn_iter) {
3780 		struct intel_encoder *encoder;
3781 
3782 		if (connector->connector_type !=
3783 		    DRM_MODE_CONNECTOR_DisplayPort)
3784 			continue;
3785 
3786 		encoder = to_intel_encoder(connector->encoder);
3787 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3788 			continue;
3789 
3790 		if (encoder && connector->status == connector_status_connected) {
3791 			intel_dp = enc_to_intel_dp(&encoder->base);
3792 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3793 		} else
3794 			seq_puts(m, "0");
3795 	}
3796 	drm_connector_list_iter_end(&conn_iter);
3797 
3798 	return 0;
3799 }
3800 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3801 
3802 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3803 {
3804 	struct drm_i915_private *dev_priv = m->private;
3805 	struct drm_device *dev = &dev_priv->drm;
3806 	int level;
3807 	int num_levels;
3808 
3809 	if (IS_CHERRYVIEW(dev_priv))
3810 		num_levels = 3;
3811 	else if (IS_VALLEYVIEW(dev_priv))
3812 		num_levels = 1;
3813 	else if (IS_G4X(dev_priv))
3814 		num_levels = 3;
3815 	else
3816 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3817 
3818 	drm_modeset_lock_all(dev);
3819 
3820 	for (level = 0; level < num_levels; level++) {
3821 		unsigned int latency = wm[level];
3822 
3823 		/*
3824 		 * - WM1+ latency values in 0.5us units
3825 		 * - latencies are in us on gen9/vlv/chv
3826 		 */
3827 		if (INTEL_GEN(dev_priv) >= 9 ||
3828 		    IS_VALLEYVIEW(dev_priv) ||
3829 		    IS_CHERRYVIEW(dev_priv) ||
3830 		    IS_G4X(dev_priv))
3831 			latency *= 10;
3832 		else if (level > 0)
3833 			latency *= 5;
3834 
3835 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3836 			   level, wm[level], latency / 10, latency % 10);
3837 	}
3838 
3839 	drm_modeset_unlock_all(dev);
3840 }
3841 
3842 static int pri_wm_latency_show(struct seq_file *m, void *data)
3843 {
3844 	struct drm_i915_private *dev_priv = m->private;
3845 	const uint16_t *latencies;
3846 
3847 	if (INTEL_GEN(dev_priv) >= 9)
3848 		latencies = dev_priv->wm.skl_latency;
3849 	else
3850 		latencies = dev_priv->wm.pri_latency;
3851 
3852 	wm_latency_show(m, latencies);
3853 
3854 	return 0;
3855 }
3856 
3857 static int spr_wm_latency_show(struct seq_file *m, void *data)
3858 {
3859 	struct drm_i915_private *dev_priv = m->private;
3860 	const uint16_t *latencies;
3861 
3862 	if (INTEL_GEN(dev_priv) >= 9)
3863 		latencies = dev_priv->wm.skl_latency;
3864 	else
3865 		latencies = dev_priv->wm.spr_latency;
3866 
3867 	wm_latency_show(m, latencies);
3868 
3869 	return 0;
3870 }
3871 
3872 static int cur_wm_latency_show(struct seq_file *m, void *data)
3873 {
3874 	struct drm_i915_private *dev_priv = m->private;
3875 	const uint16_t *latencies;
3876 
3877 	if (INTEL_GEN(dev_priv) >= 9)
3878 		latencies = dev_priv->wm.skl_latency;
3879 	else
3880 		latencies = dev_priv->wm.cur_latency;
3881 
3882 	wm_latency_show(m, latencies);
3883 
3884 	return 0;
3885 }
3886 
3887 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3888 {
3889 	struct drm_i915_private *dev_priv = inode->i_private;
3890 
3891 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3892 		return -ENODEV;
3893 
3894 	return single_open(file, pri_wm_latency_show, dev_priv);
3895 }
3896 
3897 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3898 {
3899 	struct drm_i915_private *dev_priv = inode->i_private;
3900 
3901 	if (HAS_GMCH_DISPLAY(dev_priv))
3902 		return -ENODEV;
3903 
3904 	return single_open(file, spr_wm_latency_show, dev_priv);
3905 }
3906 
3907 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3908 {
3909 	struct drm_i915_private *dev_priv = inode->i_private;
3910 
3911 	if (HAS_GMCH_DISPLAY(dev_priv))
3912 		return -ENODEV;
3913 
3914 	return single_open(file, cur_wm_latency_show, dev_priv);
3915 }
3916 
3917 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3918 				size_t len, loff_t *offp, uint16_t wm[8])
3919 {
3920 	struct seq_file *m = file->private_data;
3921 	struct drm_i915_private *dev_priv = m->private;
3922 	struct drm_device *dev = &dev_priv->drm;
3923 	uint16_t new[8] = { 0 };
3924 	int num_levels;
3925 	int level;
3926 	int ret;
3927 	char tmp[32];
3928 
3929 	if (IS_CHERRYVIEW(dev_priv))
3930 		num_levels = 3;
3931 	else if (IS_VALLEYVIEW(dev_priv))
3932 		num_levels = 1;
3933 	else if (IS_G4X(dev_priv))
3934 		num_levels = 3;
3935 	else
3936 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3937 
3938 	if (len >= sizeof(tmp))
3939 		return -EINVAL;
3940 
3941 	if (copy_from_user(tmp, ubuf, len))
3942 		return -EFAULT;
3943 
3944 	tmp[len] = '\0';
3945 
3946 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3947 		     &new[0], &new[1], &new[2], &new[3],
3948 		     &new[4], &new[5], &new[6], &new[7]);
3949 	if (ret != num_levels)
3950 		return -EINVAL;
3951 
3952 	drm_modeset_lock_all(dev);
3953 
3954 	for (level = 0; level < num_levels; level++)
3955 		wm[level] = new[level];
3956 
3957 	drm_modeset_unlock_all(dev);
3958 
3959 	return len;
3960 }
3961 
3962 
3963 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3964 				    size_t len, loff_t *offp)
3965 {
3966 	struct seq_file *m = file->private_data;
3967 	struct drm_i915_private *dev_priv = m->private;
3968 	uint16_t *latencies;
3969 
3970 	if (INTEL_GEN(dev_priv) >= 9)
3971 		latencies = dev_priv->wm.skl_latency;
3972 	else
3973 		latencies = dev_priv->wm.pri_latency;
3974 
3975 	return wm_latency_write(file, ubuf, len, offp, latencies);
3976 }
3977 
3978 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3979 				    size_t len, loff_t *offp)
3980 {
3981 	struct seq_file *m = file->private_data;
3982 	struct drm_i915_private *dev_priv = m->private;
3983 	uint16_t *latencies;
3984 
3985 	if (INTEL_GEN(dev_priv) >= 9)
3986 		latencies = dev_priv->wm.skl_latency;
3987 	else
3988 		latencies = dev_priv->wm.spr_latency;
3989 
3990 	return wm_latency_write(file, ubuf, len, offp, latencies);
3991 }
3992 
3993 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3994 				    size_t len, loff_t *offp)
3995 {
3996 	struct seq_file *m = file->private_data;
3997 	struct drm_i915_private *dev_priv = m->private;
3998 	uint16_t *latencies;
3999 
4000 	if (INTEL_GEN(dev_priv) >= 9)
4001 		latencies = dev_priv->wm.skl_latency;
4002 	else
4003 		latencies = dev_priv->wm.cur_latency;
4004 
4005 	return wm_latency_write(file, ubuf, len, offp, latencies);
4006 }
4007 
4008 static const struct file_operations i915_pri_wm_latency_fops = {
4009 	.owner = THIS_MODULE,
4010 	.open = pri_wm_latency_open,
4011 	.read = seq_read,
4012 	.llseek = seq_lseek,
4013 	.release = single_release,
4014 	.write = pri_wm_latency_write
4015 };
4016 
4017 static const struct file_operations i915_spr_wm_latency_fops = {
4018 	.owner = THIS_MODULE,
4019 	.open = spr_wm_latency_open,
4020 	.read = seq_read,
4021 	.llseek = seq_lseek,
4022 	.release = single_release,
4023 	.write = spr_wm_latency_write
4024 };
4025 
4026 static const struct file_operations i915_cur_wm_latency_fops = {
4027 	.owner = THIS_MODULE,
4028 	.open = cur_wm_latency_open,
4029 	.read = seq_read,
4030 	.llseek = seq_lseek,
4031 	.release = single_release,
4032 	.write = cur_wm_latency_write
4033 };
4034 
4035 static int
4036 i915_wedged_get(void *data, u64 *val)
4037 {
4038 	struct drm_i915_private *dev_priv = data;
4039 
4040 	*val = i915_terminally_wedged(&dev_priv->gpu_error);
4041 
4042 	return 0;
4043 }
4044 
4045 static int
4046 i915_wedged_set(void *data, u64 val)
4047 {
4048 	struct drm_i915_private *i915 = data;
4049 	struct intel_engine_cs *engine;
4050 	unsigned int tmp;
4051 
4052 	/*
4053 	 * There is no safeguard against this debugfs entry colliding
4054 	 * with the hangcheck calling same i915_handle_error() in
4055 	 * parallel, causing an explosion. For now we assume that the
4056 	 * test harness is responsible enough not to inject gpu hangs
4057 	 * while it is writing to 'i915_wedged'
4058 	 */
4059 
4060 	if (i915_reset_backoff(&i915->gpu_error))
4061 		return -EAGAIN;
4062 
4063 	for_each_engine_masked(engine, i915, val, tmp) {
4064 		engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4065 		engine->hangcheck.stalled = true;
4066 	}
4067 
4068 	i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4069 			  "Manually set wedged engine mask = %llx", val);
4070 
4071 	wait_on_bit(&i915->gpu_error.flags,
4072 		    I915_RESET_HANDOFF,
4073 		    TASK_UNINTERRUPTIBLE);
4074 
4075 	return 0;
4076 }
4077 
4078 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4079 			i915_wedged_get, i915_wedged_set,
4080 			"%llu\n");
4081 
4082 static int
4083 fault_irq_set(struct drm_i915_private *i915,
4084 	      unsigned long *irq,
4085 	      unsigned long val)
4086 {
4087 	int err;
4088 
4089 	err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4090 	if (err)
4091 		return err;
4092 
4093 	err = i915_gem_wait_for_idle(i915,
4094 				     I915_WAIT_LOCKED |
4095 				     I915_WAIT_INTERRUPTIBLE,
4096 				     MAX_SCHEDULE_TIMEOUT);
4097 	if (err)
4098 		goto err_unlock;
4099 
4100 	*irq = val;
4101 	mutex_unlock(&i915->drm.struct_mutex);
4102 
4103 	/* Flush idle worker to disarm irq */
4104 	drain_delayed_work(&i915->gt.idle_work);
4105 
4106 	return 0;
4107 
4108 err_unlock:
4109 	mutex_unlock(&i915->drm.struct_mutex);
4110 	return err;
4111 }
4112 
4113 static int
4114 i915_ring_missed_irq_get(void *data, u64 *val)
4115 {
4116 	struct drm_i915_private *dev_priv = data;
4117 
4118 	*val = dev_priv->gpu_error.missed_irq_rings;
4119 	return 0;
4120 }
4121 
4122 static int
4123 i915_ring_missed_irq_set(void *data, u64 val)
4124 {
4125 	struct drm_i915_private *i915 = data;
4126 
4127 	return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4128 }
4129 
4130 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4131 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4132 			"0x%08llx\n");
4133 
4134 static int
4135 i915_ring_test_irq_get(void *data, u64 *val)
4136 {
4137 	struct drm_i915_private *dev_priv = data;
4138 
4139 	*val = dev_priv->gpu_error.test_irq_rings;
4140 
4141 	return 0;
4142 }
4143 
4144 static int
4145 i915_ring_test_irq_set(void *data, u64 val)
4146 {
4147 	struct drm_i915_private *i915 = data;
4148 
4149 	/* GuC keeps the user interrupt permanently enabled for submission */
4150 	if (USES_GUC_SUBMISSION(i915))
4151 		return -ENODEV;
4152 
4153 	/*
4154 	 * From icl, we can no longer individually mask interrupt generation
4155 	 * from each engine.
4156 	 */
4157 	if (INTEL_GEN(i915) >= 11)
4158 		return -ENODEV;
4159 
4160 	val &= INTEL_INFO(i915)->ring_mask;
4161 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4162 
4163 	return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4164 }
4165 
4166 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4167 			i915_ring_test_irq_get, i915_ring_test_irq_set,
4168 			"0x%08llx\n");
4169 
4170 #define DROP_UNBOUND	BIT(0)
4171 #define DROP_BOUND	BIT(1)
4172 #define DROP_RETIRE	BIT(2)
4173 #define DROP_ACTIVE	BIT(3)
4174 #define DROP_FREED	BIT(4)
4175 #define DROP_SHRINK_ALL	BIT(5)
4176 #define DROP_IDLE	BIT(6)
4177 #define DROP_RESET_ACTIVE	BIT(7)
4178 #define DROP_RESET_SEQNO	BIT(8)
4179 #define DROP_ALL (DROP_UNBOUND	| \
4180 		  DROP_BOUND	| \
4181 		  DROP_RETIRE	| \
4182 		  DROP_ACTIVE	| \
4183 		  DROP_FREED	| \
4184 		  DROP_SHRINK_ALL |\
4185 		  DROP_IDLE	| \
4186 		  DROP_RESET_ACTIVE | \
4187 		  DROP_RESET_SEQNO)
4188 static int
4189 i915_drop_caches_get(void *data, u64 *val)
4190 {
4191 	*val = DROP_ALL;
4192 
4193 	return 0;
4194 }
4195 
4196 static int
4197 i915_drop_caches_set(void *data, u64 val)
4198 {
4199 	struct drm_i915_private *i915 = data;
4200 	int ret = 0;
4201 
4202 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4203 		  val, val & DROP_ALL);
4204 	intel_runtime_pm_get(i915);
4205 
4206 	if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4207 		i915_gem_set_wedged(i915);
4208 
4209 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4210 	 * on ioctls on -EAGAIN. */
4211 	if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4212 		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
4213 		if (ret)
4214 			goto out;
4215 
4216 		if (val & DROP_ACTIVE)
4217 			ret = i915_gem_wait_for_idle(i915,
4218 						     I915_WAIT_INTERRUPTIBLE |
4219 						     I915_WAIT_LOCKED,
4220 						     MAX_SCHEDULE_TIMEOUT);
4221 
4222 		if (ret == 0 && val & DROP_RESET_SEQNO)
4223 			ret = i915_gem_set_global_seqno(&i915->drm, 1);
4224 
4225 		if (val & DROP_RETIRE)
4226 			i915_retire_requests(i915);
4227 
4228 		mutex_unlock(&i915->drm.struct_mutex);
4229 	}
4230 
4231 	if (val & DROP_RESET_ACTIVE &&
4232 	    i915_terminally_wedged(&i915->gpu_error)) {
4233 		i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4234 		wait_on_bit(&i915->gpu_error.flags,
4235 			    I915_RESET_HANDOFF,
4236 			    TASK_UNINTERRUPTIBLE);
4237 	}
4238 
4239 	fs_reclaim_acquire(GFP_KERNEL);
4240 	if (val & DROP_BOUND)
4241 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4242 
4243 	if (val & DROP_UNBOUND)
4244 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4245 
4246 	if (val & DROP_SHRINK_ALL)
4247 		i915_gem_shrink_all(i915);
4248 	fs_reclaim_release(GFP_KERNEL);
4249 
4250 	if (val & DROP_IDLE) {
4251 		do {
4252 			if (READ_ONCE(i915->gt.active_requests))
4253 				flush_delayed_work(&i915->gt.retire_work);
4254 			drain_delayed_work(&i915->gt.idle_work);
4255 		} while (READ_ONCE(i915->gt.awake));
4256 	}
4257 
4258 	if (val & DROP_FREED)
4259 		i915_gem_drain_freed_objects(i915);
4260 
4261 out:
4262 	intel_runtime_pm_put(i915);
4263 
4264 	return ret;
4265 }
4266 
4267 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4268 			i915_drop_caches_get, i915_drop_caches_set,
4269 			"0x%08llx\n");
4270 
4271 static int
4272 i915_cache_sharing_get(void *data, u64 *val)
4273 {
4274 	struct drm_i915_private *dev_priv = data;
4275 	u32 snpcr;
4276 
4277 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4278 		return -ENODEV;
4279 
4280 	intel_runtime_pm_get(dev_priv);
4281 
4282 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4283 
4284 	intel_runtime_pm_put(dev_priv);
4285 
4286 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4287 
4288 	return 0;
4289 }
4290 
4291 static int
4292 i915_cache_sharing_set(void *data, u64 val)
4293 {
4294 	struct drm_i915_private *dev_priv = data;
4295 	u32 snpcr;
4296 
4297 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4298 		return -ENODEV;
4299 
4300 	if (val > 3)
4301 		return -EINVAL;
4302 
4303 	intel_runtime_pm_get(dev_priv);
4304 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4305 
4306 	/* Update the cache sharing policy here as well */
4307 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4308 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
4309 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4310 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4311 
4312 	intel_runtime_pm_put(dev_priv);
4313 	return 0;
4314 }
4315 
4316 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4317 			i915_cache_sharing_get, i915_cache_sharing_set,
4318 			"%llu\n");
4319 
4320 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4321 					  struct sseu_dev_info *sseu)
4322 {
4323 #define SS_MAX 2
4324 	const int ss_max = SS_MAX;
4325 	u32 sig1[SS_MAX], sig2[SS_MAX];
4326 	int ss;
4327 
4328 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4329 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4330 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4331 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4332 
4333 	for (ss = 0; ss < ss_max; ss++) {
4334 		unsigned int eu_cnt;
4335 
4336 		if (sig1[ss] & CHV_SS_PG_ENABLE)
4337 			/* skip disabled subslice */
4338 			continue;
4339 
4340 		sseu->slice_mask = BIT(0);
4341 		sseu->subslice_mask[0] |= BIT(ss);
4342 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4343 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4344 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4345 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4346 		sseu->eu_total += eu_cnt;
4347 		sseu->eu_per_subslice = max_t(unsigned int,
4348 					      sseu->eu_per_subslice, eu_cnt);
4349 	}
4350 #undef SS_MAX
4351 }
4352 
4353 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4354 				     struct sseu_dev_info *sseu)
4355 {
4356 #define SS_MAX 6
4357 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4358 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4359 	int s, ss;
4360 
4361 	for (s = 0; s < info->sseu.max_slices; s++) {
4362 		/*
4363 		 * FIXME: Valid SS Mask respects the spec and read
4364 		 * only valid bits for those registers, excluding reserved
4365 		 * although this seems wrong because it would leave many
4366 		 * subslices without ACK.
4367 		 */
4368 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4369 			GEN10_PGCTL_VALID_SS_MASK(s);
4370 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4371 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4372 	}
4373 
4374 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4375 		     GEN9_PGCTL_SSA_EU19_ACK |
4376 		     GEN9_PGCTL_SSA_EU210_ACK |
4377 		     GEN9_PGCTL_SSA_EU311_ACK;
4378 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4379 		     GEN9_PGCTL_SSB_EU19_ACK |
4380 		     GEN9_PGCTL_SSB_EU210_ACK |
4381 		     GEN9_PGCTL_SSB_EU311_ACK;
4382 
4383 	for (s = 0; s < info->sseu.max_slices; s++) {
4384 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4385 			/* skip disabled slice */
4386 			continue;
4387 
4388 		sseu->slice_mask |= BIT(s);
4389 		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4390 
4391 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4392 			unsigned int eu_cnt;
4393 
4394 			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4395 				/* skip disabled subslice */
4396 				continue;
4397 
4398 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4399 					       eu_mask[ss % 2]);
4400 			sseu->eu_total += eu_cnt;
4401 			sseu->eu_per_subslice = max_t(unsigned int,
4402 						      sseu->eu_per_subslice,
4403 						      eu_cnt);
4404 		}
4405 	}
4406 #undef SS_MAX
4407 }
4408 
4409 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4410 				    struct sseu_dev_info *sseu)
4411 {
4412 #define SS_MAX 3
4413 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4414 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4415 	int s, ss;
4416 
4417 	for (s = 0; s < info->sseu.max_slices; s++) {
4418 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4419 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4420 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4421 	}
4422 
4423 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4424 		     GEN9_PGCTL_SSA_EU19_ACK |
4425 		     GEN9_PGCTL_SSA_EU210_ACK |
4426 		     GEN9_PGCTL_SSA_EU311_ACK;
4427 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4428 		     GEN9_PGCTL_SSB_EU19_ACK |
4429 		     GEN9_PGCTL_SSB_EU210_ACK |
4430 		     GEN9_PGCTL_SSB_EU311_ACK;
4431 
4432 	for (s = 0; s < info->sseu.max_slices; s++) {
4433 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4434 			/* skip disabled slice */
4435 			continue;
4436 
4437 		sseu->slice_mask |= BIT(s);
4438 
4439 		if (IS_GEN9_BC(dev_priv))
4440 			sseu->subslice_mask[s] =
4441 				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4442 
4443 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4444 			unsigned int eu_cnt;
4445 
4446 			if (IS_GEN9_LP(dev_priv)) {
4447 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4448 					/* skip disabled subslice */
4449 					continue;
4450 
4451 				sseu->subslice_mask[s] |= BIT(ss);
4452 			}
4453 
4454 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4455 					       eu_mask[ss%2]);
4456 			sseu->eu_total += eu_cnt;
4457 			sseu->eu_per_subslice = max_t(unsigned int,
4458 						      sseu->eu_per_subslice,
4459 						      eu_cnt);
4460 		}
4461 	}
4462 #undef SS_MAX
4463 }
4464 
4465 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4466 					 struct sseu_dev_info *sseu)
4467 {
4468 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4469 	int s;
4470 
4471 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4472 
4473 	if (sseu->slice_mask) {
4474 		sseu->eu_per_subslice =
4475 				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4476 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4477 			sseu->subslice_mask[s] =
4478 				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4479 		}
4480 		sseu->eu_total = sseu->eu_per_subslice *
4481 				 sseu_subslice_total(sseu);
4482 
4483 		/* subtract fused off EU(s) from enabled slice(s) */
4484 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4485 			u8 subslice_7eu =
4486 				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4487 
4488 			sseu->eu_total -= hweight8(subslice_7eu);
4489 		}
4490 	}
4491 }
4492 
4493 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4494 				 const struct sseu_dev_info *sseu)
4495 {
4496 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4497 	const char *type = is_available_info ? "Available" : "Enabled";
4498 	int s;
4499 
4500 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
4501 		   sseu->slice_mask);
4502 	seq_printf(m, "  %s Slice Total: %u\n", type,
4503 		   hweight8(sseu->slice_mask));
4504 	seq_printf(m, "  %s Subslice Total: %u\n", type,
4505 		   sseu_subslice_total(sseu));
4506 	for (s = 0; s < fls(sseu->slice_mask); s++) {
4507 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4508 			   s, hweight8(sseu->subslice_mask[s]));
4509 	}
4510 	seq_printf(m, "  %s EU Total: %u\n", type,
4511 		   sseu->eu_total);
4512 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4513 		   sseu->eu_per_subslice);
4514 
4515 	if (!is_available_info)
4516 		return;
4517 
4518 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4519 	if (HAS_POOLED_EU(dev_priv))
4520 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4521 
4522 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4523 		   yesno(sseu->has_slice_pg));
4524 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4525 		   yesno(sseu->has_subslice_pg));
4526 	seq_printf(m, "  Has EU Power Gating: %s\n",
4527 		   yesno(sseu->has_eu_pg));
4528 }
4529 
4530 static int i915_sseu_status(struct seq_file *m, void *unused)
4531 {
4532 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4533 	struct sseu_dev_info sseu;
4534 
4535 	if (INTEL_GEN(dev_priv) < 8)
4536 		return -ENODEV;
4537 
4538 	seq_puts(m, "SSEU Device Info\n");
4539 	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4540 
4541 	seq_puts(m, "SSEU Device Status\n");
4542 	memset(&sseu, 0, sizeof(sseu));
4543 	sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4544 	sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4545 	sseu.max_eus_per_subslice =
4546 		INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4547 
4548 	intel_runtime_pm_get(dev_priv);
4549 
4550 	if (IS_CHERRYVIEW(dev_priv)) {
4551 		cherryview_sseu_device_status(dev_priv, &sseu);
4552 	} else if (IS_BROADWELL(dev_priv)) {
4553 		broadwell_sseu_device_status(dev_priv, &sseu);
4554 	} else if (IS_GEN9(dev_priv)) {
4555 		gen9_sseu_device_status(dev_priv, &sseu);
4556 	} else if (INTEL_GEN(dev_priv) >= 10) {
4557 		gen10_sseu_device_status(dev_priv, &sseu);
4558 	}
4559 
4560 	intel_runtime_pm_put(dev_priv);
4561 
4562 	i915_print_sseu_info(m, false, &sseu);
4563 
4564 	return 0;
4565 }
4566 
4567 static int i915_forcewake_open(struct inode *inode, struct file *file)
4568 {
4569 	struct drm_i915_private *i915 = inode->i_private;
4570 
4571 	if (INTEL_GEN(i915) < 6)
4572 		return 0;
4573 
4574 	intel_runtime_pm_get(i915);
4575 	intel_uncore_forcewake_user_get(i915);
4576 
4577 	return 0;
4578 }
4579 
4580 static int i915_forcewake_release(struct inode *inode, struct file *file)
4581 {
4582 	struct drm_i915_private *i915 = inode->i_private;
4583 
4584 	if (INTEL_GEN(i915) < 6)
4585 		return 0;
4586 
4587 	intel_uncore_forcewake_user_put(i915);
4588 	intel_runtime_pm_put(i915);
4589 
4590 	return 0;
4591 }
4592 
4593 static const struct file_operations i915_forcewake_fops = {
4594 	.owner = THIS_MODULE,
4595 	.open = i915_forcewake_open,
4596 	.release = i915_forcewake_release,
4597 };
4598 
4599 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4600 {
4601 	struct drm_i915_private *dev_priv = m->private;
4602 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4603 
4604 	/* Synchronize with everything first in case there's been an HPD
4605 	 * storm, but we haven't finished handling it in the kernel yet
4606 	 */
4607 	synchronize_irq(dev_priv->drm.irq);
4608 	flush_work(&dev_priv->hotplug.dig_port_work);
4609 	flush_work(&dev_priv->hotplug.hotplug_work);
4610 
4611 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4612 	seq_printf(m, "Detected: %s\n",
4613 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4614 
4615 	return 0;
4616 }
4617 
4618 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4619 					const char __user *ubuf, size_t len,
4620 					loff_t *offp)
4621 {
4622 	struct seq_file *m = file->private_data;
4623 	struct drm_i915_private *dev_priv = m->private;
4624 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4625 	unsigned int new_threshold;
4626 	int i;
4627 	char *newline;
4628 	char tmp[16];
4629 
4630 	if (len >= sizeof(tmp))
4631 		return -EINVAL;
4632 
4633 	if (copy_from_user(tmp, ubuf, len))
4634 		return -EFAULT;
4635 
4636 	tmp[len] = '\0';
4637 
4638 	/* Strip newline, if any */
4639 	newline = strchr(tmp, '\n');
4640 	if (newline)
4641 		*newline = '\0';
4642 
4643 	if (strcmp(tmp, "reset") == 0)
4644 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4645 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4646 		return -EINVAL;
4647 
4648 	if (new_threshold > 0)
4649 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4650 			      new_threshold);
4651 	else
4652 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4653 
4654 	spin_lock_irq(&dev_priv->irq_lock);
4655 	hotplug->hpd_storm_threshold = new_threshold;
4656 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4657 	for_each_hpd_pin(i)
4658 		hotplug->stats[i].count = 0;
4659 	spin_unlock_irq(&dev_priv->irq_lock);
4660 
4661 	/* Re-enable hpd immediately if we were in an irq storm */
4662 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4663 
4664 	return len;
4665 }
4666 
4667 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4668 {
4669 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4670 }
4671 
4672 static const struct file_operations i915_hpd_storm_ctl_fops = {
4673 	.owner = THIS_MODULE,
4674 	.open = i915_hpd_storm_ctl_open,
4675 	.read = seq_read,
4676 	.llseek = seq_lseek,
4677 	.release = single_release,
4678 	.write = i915_hpd_storm_ctl_write
4679 };
4680 
4681 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4682 {
4683 	struct drm_i915_private *dev_priv = m->private;
4684 
4685 	seq_printf(m, "Enabled: %s\n",
4686 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4687 
4688 	return 0;
4689 }
4690 
4691 static int
4692 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4693 {
4694 	return single_open(file, i915_hpd_short_storm_ctl_show,
4695 			   inode->i_private);
4696 }
4697 
4698 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4699 					      const char __user *ubuf,
4700 					      size_t len, loff_t *offp)
4701 {
4702 	struct seq_file *m = file->private_data;
4703 	struct drm_i915_private *dev_priv = m->private;
4704 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4705 	char *newline;
4706 	char tmp[16];
4707 	int i;
4708 	bool new_state;
4709 
4710 	if (len >= sizeof(tmp))
4711 		return -EINVAL;
4712 
4713 	if (copy_from_user(tmp, ubuf, len))
4714 		return -EFAULT;
4715 
4716 	tmp[len] = '\0';
4717 
4718 	/* Strip newline, if any */
4719 	newline = strchr(tmp, '\n');
4720 	if (newline)
4721 		*newline = '\0';
4722 
4723 	/* Reset to the "default" state for this system */
4724 	if (strcmp(tmp, "reset") == 0)
4725 		new_state = !HAS_DP_MST(dev_priv);
4726 	else if (kstrtobool(tmp, &new_state) != 0)
4727 		return -EINVAL;
4728 
4729 	DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4730 		      new_state ? "En" : "Dis");
4731 
4732 	spin_lock_irq(&dev_priv->irq_lock);
4733 	hotplug->hpd_short_storm_enabled = new_state;
4734 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4735 	for_each_hpd_pin(i)
4736 		hotplug->stats[i].count = 0;
4737 	spin_unlock_irq(&dev_priv->irq_lock);
4738 
4739 	/* Re-enable hpd immediately if we were in an irq storm */
4740 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4741 
4742 	return len;
4743 }
4744 
4745 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4746 	.owner = THIS_MODULE,
4747 	.open = i915_hpd_short_storm_ctl_open,
4748 	.read = seq_read,
4749 	.llseek = seq_lseek,
4750 	.release = single_release,
4751 	.write = i915_hpd_short_storm_ctl_write,
4752 };
4753 
4754 static int i915_drrs_ctl_set(void *data, u64 val)
4755 {
4756 	struct drm_i915_private *dev_priv = data;
4757 	struct drm_device *dev = &dev_priv->drm;
4758 	struct intel_crtc *crtc;
4759 
4760 	if (INTEL_GEN(dev_priv) < 7)
4761 		return -ENODEV;
4762 
4763 	for_each_intel_crtc(dev, crtc) {
4764 		struct drm_connector_list_iter conn_iter;
4765 		struct intel_crtc_state *crtc_state;
4766 		struct drm_connector *connector;
4767 		struct drm_crtc_commit *commit;
4768 		int ret;
4769 
4770 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4771 		if (ret)
4772 			return ret;
4773 
4774 		crtc_state = to_intel_crtc_state(crtc->base.state);
4775 
4776 		if (!crtc_state->base.active ||
4777 		    !crtc_state->has_drrs)
4778 			goto out;
4779 
4780 		commit = crtc_state->base.commit;
4781 		if (commit) {
4782 			ret = wait_for_completion_interruptible(&commit->hw_done);
4783 			if (ret)
4784 				goto out;
4785 		}
4786 
4787 		drm_connector_list_iter_begin(dev, &conn_iter);
4788 		drm_for_each_connector_iter(connector, &conn_iter) {
4789 			struct intel_encoder *encoder;
4790 			struct intel_dp *intel_dp;
4791 
4792 			if (!(crtc_state->base.connector_mask &
4793 			      drm_connector_mask(connector)))
4794 				continue;
4795 
4796 			encoder = intel_attached_encoder(connector);
4797 			if (encoder->type != INTEL_OUTPUT_EDP)
4798 				continue;
4799 
4800 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4801 						val ? "en" : "dis", val);
4802 
4803 			intel_dp = enc_to_intel_dp(&encoder->base);
4804 			if (val)
4805 				intel_edp_drrs_enable(intel_dp,
4806 						      crtc_state);
4807 			else
4808 				intel_edp_drrs_disable(intel_dp,
4809 						       crtc_state);
4810 		}
4811 		drm_connector_list_iter_end(&conn_iter);
4812 
4813 out:
4814 		drm_modeset_unlock(&crtc->base.mutex);
4815 		if (ret)
4816 			return ret;
4817 	}
4818 
4819 	return 0;
4820 }
4821 
4822 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4823 
4824 static ssize_t
4825 i915_fifo_underrun_reset_write(struct file *filp,
4826 			       const char __user *ubuf,
4827 			       size_t cnt, loff_t *ppos)
4828 {
4829 	struct drm_i915_private *dev_priv = filp->private_data;
4830 	struct intel_crtc *intel_crtc;
4831 	struct drm_device *dev = &dev_priv->drm;
4832 	int ret;
4833 	bool reset;
4834 
4835 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4836 	if (ret)
4837 		return ret;
4838 
4839 	if (!reset)
4840 		return cnt;
4841 
4842 	for_each_intel_crtc(dev, intel_crtc) {
4843 		struct drm_crtc_commit *commit;
4844 		struct intel_crtc_state *crtc_state;
4845 
4846 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4847 		if (ret)
4848 			return ret;
4849 
4850 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4851 		commit = crtc_state->base.commit;
4852 		if (commit) {
4853 			ret = wait_for_completion_interruptible(&commit->hw_done);
4854 			if (!ret)
4855 				ret = wait_for_completion_interruptible(&commit->flip_done);
4856 		}
4857 
4858 		if (!ret && crtc_state->base.active) {
4859 			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4860 				      pipe_name(intel_crtc->pipe));
4861 
4862 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4863 		}
4864 
4865 		drm_modeset_unlock(&intel_crtc->base.mutex);
4866 
4867 		if (ret)
4868 			return ret;
4869 	}
4870 
4871 	ret = intel_fbc_reset_underrun(dev_priv);
4872 	if (ret)
4873 		return ret;
4874 
4875 	return cnt;
4876 }
4877 
4878 static const struct file_operations i915_fifo_underrun_reset_ops = {
4879 	.owner = THIS_MODULE,
4880 	.open = simple_open,
4881 	.write = i915_fifo_underrun_reset_write,
4882 	.llseek = default_llseek,
4883 };
4884 
4885 static const struct drm_info_list i915_debugfs_list[] = {
4886 	{"i915_capabilities", i915_capabilities, 0},
4887 	{"i915_gem_objects", i915_gem_object_info, 0},
4888 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4889 	{"i915_gem_stolen", i915_gem_stolen_list_info },
4890 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4891 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4892 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4893 	{"i915_guc_info", i915_guc_info, 0},
4894 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4895 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4896 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4897 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4898 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4899 	{"i915_frequency_info", i915_frequency_info, 0},
4900 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4901 	{"i915_reset_info", i915_reset_info, 0},
4902 	{"i915_drpc_info", i915_drpc_info, 0},
4903 	{"i915_emon_status", i915_emon_status, 0},
4904 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4905 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4906 	{"i915_fbc_status", i915_fbc_status, 0},
4907 	{"i915_ips_status", i915_ips_status, 0},
4908 	{"i915_sr_status", i915_sr_status, 0},
4909 	{"i915_opregion", i915_opregion, 0},
4910 	{"i915_vbt", i915_vbt, 0},
4911 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4912 	{"i915_context_status", i915_context_status, 0},
4913 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4914 	{"i915_swizzle_info", i915_swizzle_info, 0},
4915 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4916 	{"i915_llc", i915_llc, 0},
4917 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4918 	{"i915_energy_uJ", i915_energy_uJ, 0},
4919 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4920 	{"i915_power_domain_info", i915_power_domain_info, 0},
4921 	{"i915_dmc_info", i915_dmc_info, 0},
4922 	{"i915_display_info", i915_display_info, 0},
4923 	{"i915_engine_info", i915_engine_info, 0},
4924 	{"i915_rcs_topology", i915_rcs_topology, 0},
4925 	{"i915_shrinker_info", i915_shrinker_info, 0},
4926 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4927 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4928 	{"i915_wa_registers", i915_wa_registers, 0},
4929 	{"i915_ddb_info", i915_ddb_info, 0},
4930 	{"i915_sseu_status", i915_sseu_status, 0},
4931 	{"i915_drrs_status", i915_drrs_status, 0},
4932 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4933 };
4934 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4935 
4936 static const struct i915_debugfs_files {
4937 	const char *name;
4938 	const struct file_operations *fops;
4939 } i915_debugfs_files[] = {
4940 	{"i915_wedged", &i915_wedged_fops},
4941 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4942 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4943 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4944 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4945 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4946 	{"i915_error_state", &i915_error_state_fops},
4947 	{"i915_gpu_info", &i915_gpu_info_fops},
4948 #endif
4949 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4950 	{"i915_next_seqno", &i915_next_seqno_fops},
4951 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4952 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4953 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4954 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4955 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4956 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4957 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4958 	{"i915_guc_log_level", &i915_guc_log_level_fops},
4959 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4960 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4961 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4962 	{"i915_ipc_status", &i915_ipc_status_fops},
4963 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4964 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4965 };
4966 
4967 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4968 {
4969 	struct drm_minor *minor = dev_priv->drm.primary;
4970 	struct dentry *ent;
4971 	int i;
4972 
4973 	ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4974 				  minor->debugfs_root, to_i915(minor->dev),
4975 				  &i915_forcewake_fops);
4976 	if (!ent)
4977 		return -ENOMEM;
4978 
4979 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4980 		ent = debugfs_create_file(i915_debugfs_files[i].name,
4981 					  S_IRUGO | S_IWUSR,
4982 					  minor->debugfs_root,
4983 					  to_i915(minor->dev),
4984 					  i915_debugfs_files[i].fops);
4985 		if (!ent)
4986 			return -ENOMEM;
4987 	}
4988 
4989 	return drm_debugfs_create_files(i915_debugfs_list,
4990 					I915_DEBUGFS_ENTRIES,
4991 					minor->debugfs_root, minor);
4992 }
4993 
4994 struct dpcd_block {
4995 	/* DPCD dump start address. */
4996 	unsigned int offset;
4997 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4998 	unsigned int end;
4999 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5000 	size_t size;
5001 	/* Only valid for eDP. */
5002 	bool edp;
5003 };
5004 
5005 static const struct dpcd_block i915_dpcd_debug[] = {
5006 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
5007 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
5008 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
5009 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
5010 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
5011 	{ .offset = DP_SET_POWER },
5012 	{ .offset = DP_EDP_DPCD_REV },
5013 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5014 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5015 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5016 };
5017 
5018 static int i915_dpcd_show(struct seq_file *m, void *data)
5019 {
5020 	struct drm_connector *connector = m->private;
5021 	struct intel_dp *intel_dp =
5022 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5023 	uint8_t buf[16];
5024 	ssize_t err;
5025 	int i;
5026 
5027 	if (connector->status != connector_status_connected)
5028 		return -ENODEV;
5029 
5030 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5031 		const struct dpcd_block *b = &i915_dpcd_debug[i];
5032 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5033 
5034 		if (b->edp &&
5035 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5036 			continue;
5037 
5038 		/* low tech for now */
5039 		if (WARN_ON(size > sizeof(buf)))
5040 			continue;
5041 
5042 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
5043 		if (err < 0)
5044 			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
5045 		else
5046 			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
5047 	}
5048 
5049 	return 0;
5050 }
5051 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
5052 
5053 static int i915_panel_show(struct seq_file *m, void *data)
5054 {
5055 	struct drm_connector *connector = m->private;
5056 	struct intel_dp *intel_dp =
5057 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5058 
5059 	if (connector->status != connector_status_connected)
5060 		return -ENODEV;
5061 
5062 	seq_printf(m, "Panel power up delay: %d\n",
5063 		   intel_dp->panel_power_up_delay);
5064 	seq_printf(m, "Panel power down delay: %d\n",
5065 		   intel_dp->panel_power_down_delay);
5066 	seq_printf(m, "Backlight on delay: %d\n",
5067 		   intel_dp->backlight_on_delay);
5068 	seq_printf(m, "Backlight off delay: %d\n",
5069 		   intel_dp->backlight_off_delay);
5070 
5071 	return 0;
5072 }
5073 DEFINE_SHOW_ATTRIBUTE(i915_panel);
5074 
5075 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
5076 {
5077 	struct drm_connector *connector = m->private;
5078 	struct intel_connector *intel_connector = to_intel_connector(connector);
5079 
5080 	if (connector->status != connector_status_connected)
5081 		return -ENODEV;
5082 
5083 	/* HDCP is supported by connector */
5084 	if (!intel_connector->hdcp.shim)
5085 		return -EINVAL;
5086 
5087 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
5088 		   connector->base.id);
5089 	seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
5090 		   "None" : "HDCP1.4");
5091 	seq_puts(m, "\n");
5092 
5093 	return 0;
5094 }
5095 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5096 
5097 /**
5098  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5099  * @connector: pointer to a registered drm_connector
5100  *
5101  * Cleanup will be done by drm_connector_unregister() through a call to
5102  * drm_debugfs_connector_remove().
5103  *
5104  * Returns 0 on success, negative error codes on error.
5105  */
5106 int i915_debugfs_connector_add(struct drm_connector *connector)
5107 {
5108 	struct dentry *root = connector->debugfs_entry;
5109 
5110 	/* The connector must have been registered beforehands. */
5111 	if (!root)
5112 		return -ENODEV;
5113 
5114 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5115 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5116 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
5117 				    connector, &i915_dpcd_fops);
5118 
5119 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
5120 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5121 				    connector, &i915_panel_fops);
5122 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5123 				    connector, &i915_psr_sink_status_fops);
5124 	}
5125 
5126 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5127 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5128 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5129 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5130 				    connector, &i915_hdcp_sink_capability_fops);
5131 	}
5132 
5133 	return 0;
5134 }
5135