xref: /openbmc/linux/drivers/gpu/drm/i915/i915_debugfs.c (revision 530e7a660fb795452357b36cce26b839a9a187a9)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34 
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37 	return to_i915(node->minor->dev);
38 }
39 
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
44 	struct drm_printer p = drm_seq_file_printer(m);
45 
46 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47 	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49 
50 	intel_device_info_dump_flags(info, &p);
51 	intel_device_info_dump_runtime(info, &p);
52 	intel_driver_caps_print(&dev_priv->caps, &p);
53 
54 	kernel_param_lock(THIS_MODULE);
55 	i915_params_dump(&i915_modparams, &p);
56 	kernel_param_unlock(THIS_MODULE);
57 
58 	return 0;
59 }
60 
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63 	return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65 
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68 	return obj->pin_global ? 'p' : ' ';
69 }
70 
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73 	switch (i915_gem_object_get_tiling(obj)) {
74 	default:
75 	case I915_TILING_NONE: return ' ';
76 	case I915_TILING_X: return 'X';
77 	case I915_TILING_Y: return 'Y';
78 	}
79 }
80 
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83 	return obj->userfault_count ? 'g' : ' ';
84 }
85 
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88 	return obj->mm.mapping ? 'M' : ' ';
89 }
90 
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93 	u64 size = 0;
94 	struct i915_vma *vma;
95 
96 	for_each_ggtt_vma(vma, obj) {
97 		if (drm_mm_node_allocated(&vma->node))
98 			size += vma->node.size;
99 	}
100 
101 	return size;
102 }
103 
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107 	size_t x = 0;
108 
109 	switch (page_sizes) {
110 	case 0:
111 		return "";
112 	case I915_GTT_PAGE_SIZE_4K:
113 		return "4K";
114 	case I915_GTT_PAGE_SIZE_64K:
115 		return "64K";
116 	case I915_GTT_PAGE_SIZE_2M:
117 		return "2M";
118 	default:
119 		if (!buf)
120 			return "M";
121 
122 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 			x += snprintf(buf + x, len - x, "2M, ");
124 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 			x += snprintf(buf + x, len - x, "64K, ");
126 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 			x += snprintf(buf + x, len - x, "4K, ");
128 		buf[x-2] = '\0';
129 
130 		return buf;
131 	}
132 }
133 
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138 	struct intel_engine_cs *engine;
139 	struct i915_vma *vma;
140 	unsigned int frontbuffer_bits;
141 	int pin_count = 0;
142 
143 	lockdep_assert_held(&obj->base.dev->struct_mutex);
144 
145 	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146 		   &obj->base,
147 		   get_active_flag(obj),
148 		   get_pin_flag(obj),
149 		   get_tiling_flag(obj),
150 		   get_global_flag(obj),
151 		   get_pin_mapped_flag(obj),
152 		   obj->base.size / 1024,
153 		   obj->read_domains,
154 		   obj->write_domain,
155 		   i915_cache_level_str(dev_priv, obj->cache_level),
156 		   obj->mm.dirty ? " dirty" : "",
157 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158 	if (obj->base.name)
159 		seq_printf(m, " (name: %d)", obj->base.name);
160 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
161 		if (i915_vma_is_pinned(vma))
162 			pin_count++;
163 	}
164 	seq_printf(m, " (pinned x %d)", pin_count);
165 	if (obj->pin_global)
166 		seq_printf(m, " (global)");
167 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
168 		if (!drm_mm_node_allocated(&vma->node))
169 			continue;
170 
171 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
173 			   vma->node.start, vma->node.size,
174 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175 		if (i915_vma_is_ggtt(vma)) {
176 			switch (vma->ggtt_view.type) {
177 			case I915_GGTT_VIEW_NORMAL:
178 				seq_puts(m, ", normal");
179 				break;
180 
181 			case I915_GGTT_VIEW_PARTIAL:
182 				seq_printf(m, ", partial [%08llx+%x]",
183 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
185 				break;
186 
187 			case I915_GGTT_VIEW_ROTATED:
188 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189 					   vma->ggtt_view.rotated.plane[0].width,
190 					   vma->ggtt_view.rotated.plane[0].height,
191 					   vma->ggtt_view.rotated.plane[0].stride,
192 					   vma->ggtt_view.rotated.plane[0].offset,
193 					   vma->ggtt_view.rotated.plane[1].width,
194 					   vma->ggtt_view.rotated.plane[1].height,
195 					   vma->ggtt_view.rotated.plane[1].stride,
196 					   vma->ggtt_view.rotated.plane[1].offset);
197 				break;
198 
199 			default:
200 				MISSING_CASE(vma->ggtt_view.type);
201 				break;
202 			}
203 		}
204 		if (vma->fence)
205 			seq_printf(m, " , fence: %d%s",
206 				   vma->fence->id,
207 				   i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208 		seq_puts(m, ")");
209 	}
210 	if (obj->stolen)
211 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212 
213 	engine = i915_gem_object_last_write_engine(obj);
214 	if (engine)
215 		seq_printf(m, " (%s)", engine->name);
216 
217 	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 	if (frontbuffer_bits)
219 		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221 
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224 	const struct drm_i915_gem_object *a =
225 		*(const struct drm_i915_gem_object **)A;
226 	const struct drm_i915_gem_object *b =
227 		*(const struct drm_i915_gem_object **)B;
228 
229 	if (a->stolen->start < b->stolen->start)
230 		return -1;
231 	if (a->stolen->start > b->stolen->start)
232 		return 1;
233 	return 0;
234 }
235 
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 	struct drm_device *dev = &dev_priv->drm;
240 	struct drm_i915_gem_object **objects;
241 	struct drm_i915_gem_object *obj;
242 	u64 total_obj_size, total_gtt_size;
243 	unsigned long total, count, n;
244 	int ret;
245 
246 	total = READ_ONCE(dev_priv->mm.object_count);
247 	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248 	if (!objects)
249 		return -ENOMEM;
250 
251 	ret = mutex_lock_interruptible(&dev->struct_mutex);
252 	if (ret)
253 		goto out;
254 
255 	total_obj_size = total_gtt_size = count = 0;
256 
257 	spin_lock(&dev_priv->mm.obj_lock);
258 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259 		if (count == total)
260 			break;
261 
262 		if (obj->stolen == NULL)
263 			continue;
264 
265 		objects[count++] = obj;
266 		total_obj_size += obj->base.size;
267 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268 
269 	}
270 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271 		if (count == total)
272 			break;
273 
274 		if (obj->stolen == NULL)
275 			continue;
276 
277 		objects[count++] = obj;
278 		total_obj_size += obj->base.size;
279 	}
280 	spin_unlock(&dev_priv->mm.obj_lock);
281 
282 	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283 
284 	seq_puts(m, "Stolen:\n");
285 	for (n = 0; n < count; n++) {
286 		seq_puts(m, "   ");
287 		describe_obj(m, objects[n]);
288 		seq_putc(m, '\n');
289 	}
290 	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291 		   count, total_obj_size, total_gtt_size);
292 
293 	mutex_unlock(&dev->struct_mutex);
294 out:
295 	kvfree(objects);
296 	return ret;
297 }
298 
299 struct file_stats {
300 	struct drm_i915_file_private *file_priv;
301 	unsigned long count;
302 	u64 total, unbound;
303 	u64 global, shared;
304 	u64 active, inactive;
305 };
306 
307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309 	struct drm_i915_gem_object *obj = ptr;
310 	struct file_stats *stats = data;
311 	struct i915_vma *vma;
312 
313 	lockdep_assert_held(&obj->base.dev->struct_mutex);
314 
315 	stats->count++;
316 	stats->total += obj->base.size;
317 	if (!obj->bind_count)
318 		stats->unbound += obj->base.size;
319 	if (obj->base.name || obj->base.dma_buf)
320 		stats->shared += obj->base.size;
321 
322 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 		if (!drm_mm_node_allocated(&vma->node))
324 			continue;
325 
326 		if (i915_vma_is_ggtt(vma)) {
327 			stats->global += vma->node.size;
328 		} else {
329 			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330 
331 			if (ppgtt->vm.file != stats->file_priv)
332 				continue;
333 		}
334 
335 		if (i915_vma_is_active(vma))
336 			stats->active += vma->node.size;
337 		else
338 			stats->inactive += vma->node.size;
339 	}
340 
341 	return 0;
342 }
343 
344 #define print_file_stats(m, name, stats) do { \
345 	if (stats.count) \
346 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347 			   name, \
348 			   stats.count, \
349 			   stats.total, \
350 			   stats.active, \
351 			   stats.inactive, \
352 			   stats.global, \
353 			   stats.shared, \
354 			   stats.unbound); \
355 } while (0)
356 
357 static void print_batch_pool_stats(struct seq_file *m,
358 				   struct drm_i915_private *dev_priv)
359 {
360 	struct drm_i915_gem_object *obj;
361 	struct file_stats stats;
362 	struct intel_engine_cs *engine;
363 	enum intel_engine_id id;
364 	int j;
365 
366 	memset(&stats, 0, sizeof(stats));
367 
368 	for_each_engine(engine, dev_priv, id) {
369 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370 			list_for_each_entry(obj,
371 					    &engine->batch_pool.cache_list[j],
372 					    batch_pool_link)
373 				per_file_stats(0, obj, &stats);
374 		}
375 	}
376 
377 	print_file_stats(m, "[k]batch pool", stats);
378 }
379 
380 static int per_file_ctx_stats(int idx, void *ptr, void *data)
381 {
382 	struct i915_gem_context *ctx = ptr;
383 	struct intel_engine_cs *engine;
384 	enum intel_engine_id id;
385 
386 	for_each_engine(engine, ctx->i915, id) {
387 		struct intel_context *ce = to_intel_context(ctx, engine);
388 
389 		if (ce->state)
390 			per_file_stats(0, ce->state->obj, data);
391 		if (ce->ring)
392 			per_file_stats(0, ce->ring->vma->obj, data);
393 	}
394 
395 	return 0;
396 }
397 
398 static void print_context_stats(struct seq_file *m,
399 				struct drm_i915_private *dev_priv)
400 {
401 	struct drm_device *dev = &dev_priv->drm;
402 	struct file_stats stats;
403 	struct drm_file *file;
404 
405 	memset(&stats, 0, sizeof(stats));
406 
407 	mutex_lock(&dev->struct_mutex);
408 	if (dev_priv->kernel_context)
409 		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410 
411 	list_for_each_entry(file, &dev->filelist, lhead) {
412 		struct drm_i915_file_private *fpriv = file->driver_priv;
413 		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 	}
415 	mutex_unlock(&dev->struct_mutex);
416 
417 	print_file_stats(m, "[k]contexts", stats);
418 }
419 
420 static int i915_gem_object_info(struct seq_file *m, void *data)
421 {
422 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 	struct drm_device *dev = &dev_priv->drm;
424 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
425 	u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 	u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427 	struct drm_i915_gem_object *obj;
428 	unsigned int page_sizes = 0;
429 	struct drm_file *file;
430 	char buf[80];
431 	int ret;
432 
433 	ret = mutex_lock_interruptible(&dev->struct_mutex);
434 	if (ret)
435 		return ret;
436 
437 	seq_printf(m, "%u objects, %llu bytes\n",
438 		   dev_priv->mm.object_count,
439 		   dev_priv->mm.object_memory);
440 
441 	size = count = 0;
442 	mapped_size = mapped_count = 0;
443 	purgeable_size = purgeable_count = 0;
444 	huge_size = huge_count = 0;
445 
446 	spin_lock(&dev_priv->mm.obj_lock);
447 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448 		size += obj->base.size;
449 		++count;
450 
451 		if (obj->mm.madv == I915_MADV_DONTNEED) {
452 			purgeable_size += obj->base.size;
453 			++purgeable_count;
454 		}
455 
456 		if (obj->mm.mapping) {
457 			mapped_count++;
458 			mapped_size += obj->base.size;
459 		}
460 
461 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 			huge_count++;
463 			huge_size += obj->base.size;
464 			page_sizes |= obj->mm.page_sizes.sg;
465 		}
466 	}
467 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468 
469 	size = count = dpy_size = dpy_count = 0;
470 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471 		size += obj->base.size;
472 		++count;
473 
474 		if (obj->pin_global) {
475 			dpy_size += obj->base.size;
476 			++dpy_count;
477 		}
478 
479 		if (obj->mm.madv == I915_MADV_DONTNEED) {
480 			purgeable_size += obj->base.size;
481 			++purgeable_count;
482 		}
483 
484 		if (obj->mm.mapping) {
485 			mapped_count++;
486 			mapped_size += obj->base.size;
487 		}
488 
489 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 			huge_count++;
491 			huge_size += obj->base.size;
492 			page_sizes |= obj->mm.page_sizes.sg;
493 		}
494 	}
495 	spin_unlock(&dev_priv->mm.obj_lock);
496 
497 	seq_printf(m, "%u bound objects, %llu bytes\n",
498 		   count, size);
499 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
500 		   purgeable_count, purgeable_size);
501 	seq_printf(m, "%u mapped objects, %llu bytes\n",
502 		   mapped_count, mapped_size);
503 	seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 		   huge_count,
505 		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 		   huge_size);
507 	seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508 		   dpy_count, dpy_size);
509 
510 	seq_printf(m, "%llu [%pa] gtt total\n",
511 		   ggtt->vm.total, &ggtt->mappable_end);
512 	seq_printf(m, "Supported page sizes: %s\n",
513 		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 					buf, sizeof(buf)));
515 
516 	seq_putc(m, '\n');
517 	print_batch_pool_stats(m, dev_priv);
518 	mutex_unlock(&dev->struct_mutex);
519 
520 	mutex_lock(&dev->filelist_mutex);
521 	print_context_stats(m, dev_priv);
522 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 		struct file_stats stats;
524 		struct drm_i915_file_private *file_priv = file->driver_priv;
525 		struct i915_request *request;
526 		struct task_struct *task;
527 
528 		mutex_lock(&dev->struct_mutex);
529 
530 		memset(&stats, 0, sizeof(stats));
531 		stats.file_priv = file->driver_priv;
532 		spin_lock(&file->table_lock);
533 		idr_for_each(&file->object_idr, per_file_stats, &stats);
534 		spin_unlock(&file->table_lock);
535 		/*
536 		 * Although we have a valid reference on file->pid, that does
537 		 * not guarantee that the task_struct who called get_pid() is
538 		 * still alive (e.g. get_pid(current) => fork() => exit()).
539 		 * Therefore, we need to protect this ->comm access using RCU.
540 		 */
541 		request = list_first_entry_or_null(&file_priv->mm.request_list,
542 						   struct i915_request,
543 						   client_link);
544 		rcu_read_lock();
545 		task = pid_task(request && request->gem_context->pid ?
546 				request->gem_context->pid : file->pid,
547 				PIDTYPE_PID);
548 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
549 		rcu_read_unlock();
550 
551 		mutex_unlock(&dev->struct_mutex);
552 	}
553 	mutex_unlock(&dev->filelist_mutex);
554 
555 	return 0;
556 }
557 
558 static int i915_gem_gtt_info(struct seq_file *m, void *data)
559 {
560 	struct drm_info_node *node = m->private;
561 	struct drm_i915_private *dev_priv = node_to_i915(node);
562 	struct drm_device *dev = &dev_priv->drm;
563 	struct drm_i915_gem_object **objects;
564 	struct drm_i915_gem_object *obj;
565 	u64 total_obj_size, total_gtt_size;
566 	unsigned long nobject, n;
567 	int count, ret;
568 
569 	nobject = READ_ONCE(dev_priv->mm.object_count);
570 	objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 	if (!objects)
572 		return -ENOMEM;
573 
574 	ret = mutex_lock_interruptible(&dev->struct_mutex);
575 	if (ret)
576 		return ret;
577 
578 	count = 0;
579 	spin_lock(&dev_priv->mm.obj_lock);
580 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 		objects[count++] = obj;
582 		if (count == nobject)
583 			break;
584 	}
585 	spin_unlock(&dev_priv->mm.obj_lock);
586 
587 	total_obj_size = total_gtt_size = 0;
588 	for (n = 0;  n < count; n++) {
589 		obj = objects[n];
590 
591 		seq_puts(m, "   ");
592 		describe_obj(m, obj);
593 		seq_putc(m, '\n');
594 		total_obj_size += obj->base.size;
595 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596 	}
597 
598 	mutex_unlock(&dev->struct_mutex);
599 
600 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601 		   count, total_obj_size, total_gtt_size);
602 	kvfree(objects);
603 
604 	return 0;
605 }
606 
607 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608 {
609 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 	struct drm_device *dev = &dev_priv->drm;
611 	struct drm_i915_gem_object *obj;
612 	struct intel_engine_cs *engine;
613 	enum intel_engine_id id;
614 	int total = 0;
615 	int ret, j;
616 
617 	ret = mutex_lock_interruptible(&dev->struct_mutex);
618 	if (ret)
619 		return ret;
620 
621 	for_each_engine(engine, dev_priv, id) {
622 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623 			int count;
624 
625 			count = 0;
626 			list_for_each_entry(obj,
627 					    &engine->batch_pool.cache_list[j],
628 					    batch_pool_link)
629 				count++;
630 			seq_printf(m, "%s cache[%d]: %d objects\n",
631 				   engine->name, j, count);
632 
633 			list_for_each_entry(obj,
634 					    &engine->batch_pool.cache_list[j],
635 					    batch_pool_link) {
636 				seq_puts(m, "   ");
637 				describe_obj(m, obj);
638 				seq_putc(m, '\n');
639 			}
640 
641 			total += count;
642 		}
643 	}
644 
645 	seq_printf(m, "total: %d\n", total);
646 
647 	mutex_unlock(&dev->struct_mutex);
648 
649 	return 0;
650 }
651 
652 static void gen8_display_interrupt_info(struct seq_file *m)
653 {
654 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 	int pipe;
656 
657 	for_each_pipe(dev_priv, pipe) {
658 		enum intel_display_power_domain power_domain;
659 
660 		power_domain = POWER_DOMAIN_PIPE(pipe);
661 		if (!intel_display_power_get_if_enabled(dev_priv,
662 							power_domain)) {
663 			seq_printf(m, "Pipe %c power disabled\n",
664 				   pipe_name(pipe));
665 			continue;
666 		}
667 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 			   pipe_name(pipe),
669 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 			   pipe_name(pipe),
672 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 		seq_printf(m, "Pipe %c IER:\t%08x\n",
674 			   pipe_name(pipe),
675 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
676 
677 		intel_display_power_put(dev_priv, power_domain);
678 	}
679 
680 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 		   I915_READ(GEN8_DE_PORT_IMR));
682 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 		   I915_READ(GEN8_DE_PORT_IIR));
684 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 		   I915_READ(GEN8_DE_PORT_IER));
686 
687 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 		   I915_READ(GEN8_DE_MISC_IMR));
689 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 		   I915_READ(GEN8_DE_MISC_IIR));
691 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 		   I915_READ(GEN8_DE_MISC_IER));
693 
694 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 		   I915_READ(GEN8_PCU_IMR));
696 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 		   I915_READ(GEN8_PCU_IIR));
698 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 		   I915_READ(GEN8_PCU_IER));
700 }
701 
702 static int i915_interrupt_info(struct seq_file *m, void *data)
703 {
704 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
705 	struct intel_engine_cs *engine;
706 	enum intel_engine_id id;
707 	int i, pipe;
708 
709 	intel_runtime_pm_get(dev_priv);
710 
711 	if (IS_CHERRYVIEW(dev_priv)) {
712 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 			   I915_READ(GEN8_MASTER_IRQ));
714 
715 		seq_printf(m, "Display IER:\t%08x\n",
716 			   I915_READ(VLV_IER));
717 		seq_printf(m, "Display IIR:\t%08x\n",
718 			   I915_READ(VLV_IIR));
719 		seq_printf(m, "Display IIR_RW:\t%08x\n",
720 			   I915_READ(VLV_IIR_RW));
721 		seq_printf(m, "Display IMR:\t%08x\n",
722 			   I915_READ(VLV_IMR));
723 		for_each_pipe(dev_priv, pipe) {
724 			enum intel_display_power_domain power_domain;
725 
726 			power_domain = POWER_DOMAIN_PIPE(pipe);
727 			if (!intel_display_power_get_if_enabled(dev_priv,
728 								power_domain)) {
729 				seq_printf(m, "Pipe %c power disabled\n",
730 					   pipe_name(pipe));
731 				continue;
732 			}
733 
734 			seq_printf(m, "Pipe %c stat:\t%08x\n",
735 				   pipe_name(pipe),
736 				   I915_READ(PIPESTAT(pipe)));
737 
738 			intel_display_power_put(dev_priv, power_domain);
739 		}
740 
741 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742 		seq_printf(m, "Port hotplug:\t%08x\n",
743 			   I915_READ(PORT_HOTPLUG_EN));
744 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 			   I915_READ(VLV_DPFLIPSTAT));
746 		seq_printf(m, "DPINVGTT:\t%08x\n",
747 			   I915_READ(DPINVGTT));
748 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
749 
750 		for (i = 0; i < 4; i++) {
751 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 				   i, I915_READ(GEN8_GT_IMR(i)));
753 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 				   i, I915_READ(GEN8_GT_IIR(i)));
755 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 				   i, I915_READ(GEN8_GT_IER(i)));
757 		}
758 
759 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 			   I915_READ(GEN8_PCU_IMR));
761 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 			   I915_READ(GEN8_PCU_IIR));
763 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 			   I915_READ(GEN8_PCU_IER));
765 	} else if (INTEL_GEN(dev_priv) >= 11) {
766 		seq_printf(m, "Master Interrupt Control:  %08x\n",
767 			   I915_READ(GEN11_GFX_MSTR_IRQ));
768 
769 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
770 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
772 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
774 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
778 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
780 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781 
782 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 			   I915_READ(GEN11_DISPLAY_INT_CTL));
784 
785 		gen8_display_interrupt_info(m);
786 	} else if (INTEL_GEN(dev_priv) >= 8) {
787 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 			   I915_READ(GEN8_MASTER_IRQ));
789 
790 		for (i = 0; i < 4; i++) {
791 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 				   i, I915_READ(GEN8_GT_IMR(i)));
793 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 				   i, I915_READ(GEN8_GT_IIR(i)));
795 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 				   i, I915_READ(GEN8_GT_IER(i)));
797 		}
798 
799 		gen8_display_interrupt_info(m);
800 	} else if (IS_VALLEYVIEW(dev_priv)) {
801 		seq_printf(m, "Display IER:\t%08x\n",
802 			   I915_READ(VLV_IER));
803 		seq_printf(m, "Display IIR:\t%08x\n",
804 			   I915_READ(VLV_IIR));
805 		seq_printf(m, "Display IIR_RW:\t%08x\n",
806 			   I915_READ(VLV_IIR_RW));
807 		seq_printf(m, "Display IMR:\t%08x\n",
808 			   I915_READ(VLV_IMR));
809 		for_each_pipe(dev_priv, pipe) {
810 			enum intel_display_power_domain power_domain;
811 
812 			power_domain = POWER_DOMAIN_PIPE(pipe);
813 			if (!intel_display_power_get_if_enabled(dev_priv,
814 								power_domain)) {
815 				seq_printf(m, "Pipe %c power disabled\n",
816 					   pipe_name(pipe));
817 				continue;
818 			}
819 
820 			seq_printf(m, "Pipe %c stat:\t%08x\n",
821 				   pipe_name(pipe),
822 				   I915_READ(PIPESTAT(pipe)));
823 			intel_display_power_put(dev_priv, power_domain);
824 		}
825 
826 		seq_printf(m, "Master IER:\t%08x\n",
827 			   I915_READ(VLV_MASTER_IER));
828 
829 		seq_printf(m, "Render IER:\t%08x\n",
830 			   I915_READ(GTIER));
831 		seq_printf(m, "Render IIR:\t%08x\n",
832 			   I915_READ(GTIIR));
833 		seq_printf(m, "Render IMR:\t%08x\n",
834 			   I915_READ(GTIMR));
835 
836 		seq_printf(m, "PM IER:\t\t%08x\n",
837 			   I915_READ(GEN6_PMIER));
838 		seq_printf(m, "PM IIR:\t\t%08x\n",
839 			   I915_READ(GEN6_PMIIR));
840 		seq_printf(m, "PM IMR:\t\t%08x\n",
841 			   I915_READ(GEN6_PMIMR));
842 
843 		seq_printf(m, "Port hotplug:\t%08x\n",
844 			   I915_READ(PORT_HOTPLUG_EN));
845 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 			   I915_READ(VLV_DPFLIPSTAT));
847 		seq_printf(m, "DPINVGTT:\t%08x\n",
848 			   I915_READ(DPINVGTT));
849 
850 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
851 		seq_printf(m, "Interrupt enable:    %08x\n",
852 			   I915_READ(IER));
853 		seq_printf(m, "Interrupt identity:  %08x\n",
854 			   I915_READ(IIR));
855 		seq_printf(m, "Interrupt mask:      %08x\n",
856 			   I915_READ(IMR));
857 		for_each_pipe(dev_priv, pipe)
858 			seq_printf(m, "Pipe %c stat:         %08x\n",
859 				   pipe_name(pipe),
860 				   I915_READ(PIPESTAT(pipe)));
861 	} else {
862 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
863 			   I915_READ(DEIER));
864 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
865 			   I915_READ(DEIIR));
866 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
867 			   I915_READ(DEIMR));
868 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
869 			   I915_READ(SDEIER));
870 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
871 			   I915_READ(SDEIIR));
872 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
873 			   I915_READ(SDEIMR));
874 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
875 			   I915_READ(GTIER));
876 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
877 			   I915_READ(GTIIR));
878 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
879 			   I915_READ(GTIMR));
880 	}
881 
882 	if (INTEL_GEN(dev_priv) >= 11) {
883 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
895 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901 
902 	} else if (INTEL_GEN(dev_priv) >= 6) {
903 		for_each_engine(engine, dev_priv, id) {
904 			seq_printf(m,
905 				   "Graphics Interrupt mask (%s):	%08x\n",
906 				   engine->name, I915_READ_IMR(engine));
907 		}
908 	}
909 
910 	intel_runtime_pm_put(dev_priv);
911 
912 	return 0;
913 }
914 
915 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916 {
917 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 	struct drm_device *dev = &dev_priv->drm;
919 	int i, ret;
920 
921 	ret = mutex_lock_interruptible(&dev->struct_mutex);
922 	if (ret)
923 		return ret;
924 
925 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
927 		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
928 
929 		seq_printf(m, "Fence %d, pin count = %d, object = ",
930 			   i, dev_priv->fence_regs[i].pin_count);
931 		if (!vma)
932 			seq_puts(m, "unused");
933 		else
934 			describe_obj(m, vma->obj);
935 		seq_putc(m, '\n');
936 	}
937 
938 	mutex_unlock(&dev->struct_mutex);
939 	return 0;
940 }
941 
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
943 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 			      size_t count, loff_t *pos)
945 {
946 	struct i915_gpu_state *error = file->private_data;
947 	struct drm_i915_error_state_buf str;
948 	ssize_t ret;
949 	loff_t tmp;
950 
951 	if (!error)
952 		return 0;
953 
954 	ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955 	if (ret)
956 		return ret;
957 
958 	ret = i915_error_state_to_str(&str, error);
959 	if (ret)
960 		goto out;
961 
962 	tmp = 0;
963 	ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964 	if (ret < 0)
965 		goto out;
966 
967 	*pos = str.start + ret;
968 out:
969 	i915_error_state_buf_release(&str);
970 	return ret;
971 }
972 
973 static int gpu_state_release(struct inode *inode, struct file *file)
974 {
975 	i915_gpu_state_put(file->private_data);
976 	return 0;
977 }
978 
979 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980 {
981 	struct drm_i915_private *i915 = inode->i_private;
982 	struct i915_gpu_state *gpu;
983 
984 	intel_runtime_pm_get(i915);
985 	gpu = i915_capture_gpu_state(i915);
986 	intel_runtime_pm_put(i915);
987 	if (!gpu)
988 		return -ENOMEM;
989 
990 	file->private_data = gpu;
991 	return 0;
992 }
993 
994 static const struct file_operations i915_gpu_info_fops = {
995 	.owner = THIS_MODULE,
996 	.open = i915_gpu_info_open,
997 	.read = gpu_state_read,
998 	.llseek = default_llseek,
999 	.release = gpu_state_release,
1000 };
1001 
1002 static ssize_t
1003 i915_error_state_write(struct file *filp,
1004 		       const char __user *ubuf,
1005 		       size_t cnt,
1006 		       loff_t *ppos)
1007 {
1008 	struct i915_gpu_state *error = filp->private_data;
1009 
1010 	if (!error)
1011 		return 0;
1012 
1013 	DRM_DEBUG_DRIVER("Resetting error state\n");
1014 	i915_reset_error_state(error->i915);
1015 
1016 	return cnt;
1017 }
1018 
1019 static int i915_error_state_open(struct inode *inode, struct file *file)
1020 {
1021 	file->private_data = i915_first_error_state(inode->i_private);
1022 	return 0;
1023 }
1024 
1025 static const struct file_operations i915_error_state_fops = {
1026 	.owner = THIS_MODULE,
1027 	.open = i915_error_state_open,
1028 	.read = gpu_state_read,
1029 	.write = i915_error_state_write,
1030 	.llseek = default_llseek,
1031 	.release = gpu_state_release,
1032 };
1033 #endif
1034 
1035 static int
1036 i915_next_seqno_set(void *data, u64 val)
1037 {
1038 	struct drm_i915_private *dev_priv = data;
1039 	struct drm_device *dev = &dev_priv->drm;
1040 	int ret;
1041 
1042 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 	if (ret)
1044 		return ret;
1045 
1046 	intel_runtime_pm_get(dev_priv);
1047 	ret = i915_gem_set_global_seqno(dev, val);
1048 	intel_runtime_pm_put(dev_priv);
1049 
1050 	mutex_unlock(&dev->struct_mutex);
1051 
1052 	return ret;
1053 }
1054 
1055 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056 			NULL, i915_next_seqno_set,
1057 			"0x%llx\n");
1058 
1059 static int i915_frequency_info(struct seq_file *m, void *unused)
1060 {
1061 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1062 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1063 	int ret = 0;
1064 
1065 	intel_runtime_pm_get(dev_priv);
1066 
1067 	if (IS_GEN5(dev_priv)) {
1068 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070 
1071 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074 			   MEMSTAT_VID_SHIFT);
1075 		seq_printf(m, "Current P-state: %d\n",
1076 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1077 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1078 		u32 rpmodectl, freq_sts;
1079 
1080 		mutex_lock(&dev_priv->pcu_lock);
1081 
1082 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083 		seq_printf(m, "Video Turbo Mode: %s\n",
1084 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085 		seq_printf(m, "HW control enabled: %s\n",
1086 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1087 		seq_printf(m, "SW control enabled: %s\n",
1088 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089 				  GEN6_RP_MEDIA_SW_MODE));
1090 
1091 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094 
1095 		seq_printf(m, "actual GPU freq: %d MHz\n",
1096 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097 
1098 		seq_printf(m, "current GPU freq: %d MHz\n",
1099 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1100 
1101 		seq_printf(m, "max GPU freq: %d MHz\n",
1102 			   intel_gpu_freq(dev_priv, rps->max_freq));
1103 
1104 		seq_printf(m, "min GPU freq: %d MHz\n",
1105 			   intel_gpu_freq(dev_priv, rps->min_freq));
1106 
1107 		seq_printf(m, "idle GPU freq: %d MHz\n",
1108 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1109 
1110 		seq_printf(m,
1111 			   "efficient (RPe) frequency: %d MHz\n",
1112 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1113 		mutex_unlock(&dev_priv->pcu_lock);
1114 	} else if (INTEL_GEN(dev_priv) >= 6) {
1115 		u32 rp_state_limits;
1116 		u32 gt_perf_status;
1117 		u32 rp_state_cap;
1118 		u32 rpmodectl, rpinclimit, rpdeclimit;
1119 		u32 rpstat, cagf, reqf;
1120 		u32 rpupei, rpcurup, rpprevup;
1121 		u32 rpdownei, rpcurdown, rpprevdown;
1122 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1123 		int max_freq;
1124 
1125 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1126 		if (IS_GEN9_LP(dev_priv)) {
1127 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129 		} else {
1130 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132 		}
1133 
1134 		/* RPSTAT1 is in the GT power well */
1135 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1136 
1137 		reqf = I915_READ(GEN6_RPNSWREQ);
1138 		if (INTEL_GEN(dev_priv) >= 9)
1139 			reqf >>= 23;
1140 		else {
1141 			reqf &= ~GEN6_TURBO_DISABLE;
1142 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1143 				reqf >>= 24;
1144 			else
1145 				reqf >>= 25;
1146 		}
1147 		reqf = intel_gpu_freq(dev_priv, reqf);
1148 
1149 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152 
1153 		rpstat = I915_READ(GEN6_RPSTAT1);
1154 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1160 		cagf = intel_gpu_freq(dev_priv,
1161 				      intel_get_cagf(dev_priv, rpstat));
1162 
1163 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1164 
1165 		if (INTEL_GEN(dev_priv) >= 11) {
1166 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168 			/*
1169 			 * The equivalent to the PM ISR & IIR cannot be read
1170 			 * without affecting the current state of the system
1171 			 */
1172 			pm_isr = 0;
1173 			pm_iir = 0;
1174 		} else if (INTEL_GEN(dev_priv) >= 8) {
1175 			pm_ier = I915_READ(GEN8_GT_IER(2));
1176 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1177 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1178 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1179 		} else {
1180 			pm_ier = I915_READ(GEN6_PMIER);
1181 			pm_imr = I915_READ(GEN6_PMIMR);
1182 			pm_isr = I915_READ(GEN6_PMISR);
1183 			pm_iir = I915_READ(GEN6_PMIIR);
1184 		}
1185 		pm_mask = I915_READ(GEN6_PMINTRMSK);
1186 
1187 		seq_printf(m, "Video Turbo Mode: %s\n",
1188 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189 		seq_printf(m, "HW control enabled: %s\n",
1190 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1191 		seq_printf(m, "SW control enabled: %s\n",
1192 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193 				  GEN6_RP_MEDIA_SW_MODE));
1194 
1195 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196 			   pm_ier, pm_imr, pm_mask);
1197 		if (INTEL_GEN(dev_priv) <= 10)
1198 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199 				   pm_isr, pm_iir);
1200 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1201 			   rps->pm_intrmsk_mbz);
1202 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1203 		seq_printf(m, "Render p-state ratio: %d\n",
1204 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1205 		seq_printf(m, "Render p-state VID: %d\n",
1206 			   gt_perf_status & 0xff);
1207 		seq_printf(m, "Render p-state limit: %d\n",
1208 			   rp_state_limits & 0xff);
1209 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1213 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1214 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1215 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1221 		seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
1222 
1223 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1224 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1225 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1226 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1227 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1228 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1229 		seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
1230 
1231 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1232 			    rp_state_cap >> 16) & 0xff;
1233 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1234 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1235 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1236 			   intel_gpu_freq(dev_priv, max_freq));
1237 
1238 		max_freq = (rp_state_cap & 0xff00) >> 8;
1239 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1240 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1241 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1242 			   intel_gpu_freq(dev_priv, max_freq));
1243 
1244 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1245 			    rp_state_cap >> 0) & 0xff;
1246 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1247 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1248 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1249 			   intel_gpu_freq(dev_priv, max_freq));
1250 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1251 			   intel_gpu_freq(dev_priv, rps->max_freq));
1252 
1253 		seq_printf(m, "Current freq: %d MHz\n",
1254 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1255 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1256 		seq_printf(m, "Idle freq: %d MHz\n",
1257 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1258 		seq_printf(m, "Min freq: %d MHz\n",
1259 			   intel_gpu_freq(dev_priv, rps->min_freq));
1260 		seq_printf(m, "Boost freq: %d MHz\n",
1261 			   intel_gpu_freq(dev_priv, rps->boost_freq));
1262 		seq_printf(m, "Max freq: %d MHz\n",
1263 			   intel_gpu_freq(dev_priv, rps->max_freq));
1264 		seq_printf(m,
1265 			   "efficient (RPe) frequency: %d MHz\n",
1266 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1267 	} else {
1268 		seq_puts(m, "no P-state info available\n");
1269 	}
1270 
1271 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1272 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1273 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1274 
1275 	intel_runtime_pm_put(dev_priv);
1276 	return ret;
1277 }
1278 
1279 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1280 			       struct seq_file *m,
1281 			       struct intel_instdone *instdone)
1282 {
1283 	int slice;
1284 	int subslice;
1285 
1286 	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1287 		   instdone->instdone);
1288 
1289 	if (INTEL_GEN(dev_priv) <= 3)
1290 		return;
1291 
1292 	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1293 		   instdone->slice_common);
1294 
1295 	if (INTEL_GEN(dev_priv) <= 6)
1296 		return;
1297 
1298 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1299 		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1300 			   slice, subslice, instdone->sampler[slice][subslice]);
1301 
1302 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1303 		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1304 			   slice, subslice, instdone->row[slice][subslice]);
1305 }
1306 
1307 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1308 {
1309 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1310 	struct intel_engine_cs *engine;
1311 	u64 acthd[I915_NUM_ENGINES];
1312 	u32 seqno[I915_NUM_ENGINES];
1313 	struct intel_instdone instdone;
1314 	enum intel_engine_id id;
1315 
1316 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1317 		seq_puts(m, "Wedged\n");
1318 	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1319 		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1320 	if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1321 		seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1322 	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1323 		seq_puts(m, "Waiter holding struct mutex\n");
1324 	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1325 		seq_puts(m, "struct_mutex blocked for reset\n");
1326 
1327 	if (!i915_modparams.enable_hangcheck) {
1328 		seq_puts(m, "Hangcheck disabled\n");
1329 		return 0;
1330 	}
1331 
1332 	intel_runtime_pm_get(dev_priv);
1333 
1334 	for_each_engine(engine, dev_priv, id) {
1335 		acthd[id] = intel_engine_get_active_head(engine);
1336 		seqno[id] = intel_engine_get_seqno(engine);
1337 	}
1338 
1339 	intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1340 
1341 	intel_runtime_pm_put(dev_priv);
1342 
1343 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1344 		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1345 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1346 					    jiffies));
1347 	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1348 		seq_puts(m, "Hangcheck active, work pending\n");
1349 	else
1350 		seq_puts(m, "Hangcheck inactive\n");
1351 
1352 	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1353 
1354 	for_each_engine(engine, dev_priv, id) {
1355 		struct intel_breadcrumbs *b = &engine->breadcrumbs;
1356 		struct rb_node *rb;
1357 
1358 		seq_printf(m, "%s:\n", engine->name);
1359 		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1360 			   engine->hangcheck.seqno, seqno[id],
1361 			   intel_engine_last_submit(engine));
1362 		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1363 			   yesno(intel_engine_has_waiter(engine)),
1364 			   yesno(test_bit(engine->id,
1365 					  &dev_priv->gpu_error.missed_irq_rings)),
1366 			   yesno(engine->hangcheck.stalled),
1367 			   yesno(engine->hangcheck.wedged));
1368 
1369 		spin_lock_irq(&b->rb_lock);
1370 		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1371 			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1372 
1373 			seq_printf(m, "\t%s [%d] waiting for %x\n",
1374 				   w->tsk->comm, w->tsk->pid, w->seqno);
1375 		}
1376 		spin_unlock_irq(&b->rb_lock);
1377 
1378 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1379 			   (long long)engine->hangcheck.acthd,
1380 			   (long long)acthd[id]);
1381 		seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1382 			   hangcheck_action_to_str(engine->hangcheck.action),
1383 			   engine->hangcheck.action,
1384 			   jiffies_to_msecs(jiffies -
1385 					    engine->hangcheck.action_timestamp));
1386 
1387 		if (engine->id == RCS) {
1388 			seq_puts(m, "\tinstdone read =\n");
1389 
1390 			i915_instdone_info(dev_priv, m, &instdone);
1391 
1392 			seq_puts(m, "\tinstdone accu =\n");
1393 
1394 			i915_instdone_info(dev_priv, m,
1395 					   &engine->hangcheck.instdone);
1396 		}
1397 	}
1398 
1399 	return 0;
1400 }
1401 
1402 static int i915_reset_info(struct seq_file *m, void *unused)
1403 {
1404 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1405 	struct i915_gpu_error *error = &dev_priv->gpu_error;
1406 	struct intel_engine_cs *engine;
1407 	enum intel_engine_id id;
1408 
1409 	seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1410 
1411 	for_each_engine(engine, dev_priv, id) {
1412 		seq_printf(m, "%s = %u\n", engine->name,
1413 			   i915_reset_engine_count(error, engine));
1414 	}
1415 
1416 	return 0;
1417 }
1418 
1419 static int ironlake_drpc_info(struct seq_file *m)
1420 {
1421 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1422 	u32 rgvmodectl, rstdbyctl;
1423 	u16 crstandvid;
1424 
1425 	rgvmodectl = I915_READ(MEMMODECTL);
1426 	rstdbyctl = I915_READ(RSTDBYCTL);
1427 	crstandvid = I915_READ16(CRSTANDVID);
1428 
1429 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1430 	seq_printf(m, "Boost freq: %d\n",
1431 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1432 		   MEMMODE_BOOST_FREQ_SHIFT);
1433 	seq_printf(m, "HW control enabled: %s\n",
1434 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1435 	seq_printf(m, "SW control enabled: %s\n",
1436 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1437 	seq_printf(m, "Gated voltage change: %s\n",
1438 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1439 	seq_printf(m, "Starting frequency: P%d\n",
1440 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1441 	seq_printf(m, "Max P-state: P%d\n",
1442 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1443 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1444 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1445 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1446 	seq_printf(m, "Render standby enabled: %s\n",
1447 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1448 	seq_puts(m, "Current RS state: ");
1449 	switch (rstdbyctl & RSX_STATUS_MASK) {
1450 	case RSX_STATUS_ON:
1451 		seq_puts(m, "on\n");
1452 		break;
1453 	case RSX_STATUS_RC1:
1454 		seq_puts(m, "RC1\n");
1455 		break;
1456 	case RSX_STATUS_RC1E:
1457 		seq_puts(m, "RC1E\n");
1458 		break;
1459 	case RSX_STATUS_RS1:
1460 		seq_puts(m, "RS1\n");
1461 		break;
1462 	case RSX_STATUS_RS2:
1463 		seq_puts(m, "RS2 (RC6)\n");
1464 		break;
1465 	case RSX_STATUS_RS3:
1466 		seq_puts(m, "RC3 (RC6+)\n");
1467 		break;
1468 	default:
1469 		seq_puts(m, "unknown\n");
1470 		break;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 static int i915_forcewake_domains(struct seq_file *m, void *data)
1477 {
1478 	struct drm_i915_private *i915 = node_to_i915(m->private);
1479 	struct intel_uncore_forcewake_domain *fw_domain;
1480 	unsigned int tmp;
1481 
1482 	seq_printf(m, "user.bypass_count = %u\n",
1483 		   i915->uncore.user_forcewake.count);
1484 
1485 	for_each_fw_domain(fw_domain, i915, tmp)
1486 		seq_printf(m, "%s.wake_count = %u\n",
1487 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1488 			   READ_ONCE(fw_domain->wake_count));
1489 
1490 	return 0;
1491 }
1492 
1493 static void print_rc6_res(struct seq_file *m,
1494 			  const char *title,
1495 			  const i915_reg_t reg)
1496 {
1497 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1498 
1499 	seq_printf(m, "%s %u (%llu us)\n",
1500 		   title, I915_READ(reg),
1501 		   intel_rc6_residency_us(dev_priv, reg));
1502 }
1503 
1504 static int vlv_drpc_info(struct seq_file *m)
1505 {
1506 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1507 	u32 rcctl1, pw_status;
1508 
1509 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1510 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1511 
1512 	seq_printf(m, "RC6 Enabled: %s\n",
1513 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1514 					GEN6_RC_CTL_EI_MODE(1))));
1515 	seq_printf(m, "Render Power Well: %s\n",
1516 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1517 	seq_printf(m, "Media Power Well: %s\n",
1518 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1519 
1520 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1521 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1522 
1523 	return i915_forcewake_domains(m, NULL);
1524 }
1525 
1526 static int gen6_drpc_info(struct seq_file *m)
1527 {
1528 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1529 	u32 gt_core_status, rcctl1, rc6vids = 0;
1530 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1531 
1532 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1533 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1534 
1535 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1536 	if (INTEL_GEN(dev_priv) >= 9) {
1537 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1538 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1539 	}
1540 
1541 	if (INTEL_GEN(dev_priv) <= 7) {
1542 		mutex_lock(&dev_priv->pcu_lock);
1543 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1544 				       &rc6vids);
1545 		mutex_unlock(&dev_priv->pcu_lock);
1546 	}
1547 
1548 	seq_printf(m, "RC1e Enabled: %s\n",
1549 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1550 	seq_printf(m, "RC6 Enabled: %s\n",
1551 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1552 	if (INTEL_GEN(dev_priv) >= 9) {
1553 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1554 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1555 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1556 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1557 	}
1558 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1559 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1560 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1561 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1562 	seq_puts(m, "Current RC state: ");
1563 	switch (gt_core_status & GEN6_RCn_MASK) {
1564 	case GEN6_RC0:
1565 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1566 			seq_puts(m, "Core Power Down\n");
1567 		else
1568 			seq_puts(m, "on\n");
1569 		break;
1570 	case GEN6_RC3:
1571 		seq_puts(m, "RC3\n");
1572 		break;
1573 	case GEN6_RC6:
1574 		seq_puts(m, "RC6\n");
1575 		break;
1576 	case GEN6_RC7:
1577 		seq_puts(m, "RC7\n");
1578 		break;
1579 	default:
1580 		seq_puts(m, "Unknown\n");
1581 		break;
1582 	}
1583 
1584 	seq_printf(m, "Core Power Down: %s\n",
1585 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1586 	if (INTEL_GEN(dev_priv) >= 9) {
1587 		seq_printf(m, "Render Power Well: %s\n",
1588 			(gen9_powergate_status &
1589 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1590 		seq_printf(m, "Media Power Well: %s\n",
1591 			(gen9_powergate_status &
1592 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1593 	}
1594 
1595 	/* Not exactly sure what this is */
1596 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1597 		      GEN6_GT_GFX_RC6_LOCKED);
1598 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1599 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1600 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1601 
1602 	if (INTEL_GEN(dev_priv) <= 7) {
1603 		seq_printf(m, "RC6   voltage: %dmV\n",
1604 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1605 		seq_printf(m, "RC6+  voltage: %dmV\n",
1606 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1607 		seq_printf(m, "RC6++ voltage: %dmV\n",
1608 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1609 	}
1610 
1611 	return i915_forcewake_domains(m, NULL);
1612 }
1613 
1614 static int i915_drpc_info(struct seq_file *m, void *unused)
1615 {
1616 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1617 	int err;
1618 
1619 	intel_runtime_pm_get(dev_priv);
1620 
1621 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1622 		err = vlv_drpc_info(m);
1623 	else if (INTEL_GEN(dev_priv) >= 6)
1624 		err = gen6_drpc_info(m);
1625 	else
1626 		err = ironlake_drpc_info(m);
1627 
1628 	intel_runtime_pm_put(dev_priv);
1629 
1630 	return err;
1631 }
1632 
1633 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1634 {
1635 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1636 
1637 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1638 		   dev_priv->fb_tracking.busy_bits);
1639 
1640 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1641 		   dev_priv->fb_tracking.flip_bits);
1642 
1643 	return 0;
1644 }
1645 
1646 static int i915_fbc_status(struct seq_file *m, void *unused)
1647 {
1648 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1649 	struct intel_fbc *fbc = &dev_priv->fbc;
1650 
1651 	if (!HAS_FBC(dev_priv))
1652 		return -ENODEV;
1653 
1654 	intel_runtime_pm_get(dev_priv);
1655 	mutex_lock(&fbc->lock);
1656 
1657 	if (intel_fbc_is_active(dev_priv))
1658 		seq_puts(m, "FBC enabled\n");
1659 	else
1660 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1661 
1662 	if (fbc->work.scheduled)
1663 		seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
1664 			   fbc->work.scheduled_vblank,
1665 			   drm_crtc_vblank_count(&fbc->crtc->base));
1666 
1667 	if (intel_fbc_is_active(dev_priv)) {
1668 		u32 mask;
1669 
1670 		if (INTEL_GEN(dev_priv) >= 8)
1671 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1672 		else if (INTEL_GEN(dev_priv) >= 7)
1673 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1674 		else if (INTEL_GEN(dev_priv) >= 5)
1675 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1676 		else if (IS_G4X(dev_priv))
1677 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1678 		else
1679 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1680 							FBC_STAT_COMPRESSED);
1681 
1682 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1683 	}
1684 
1685 	mutex_unlock(&fbc->lock);
1686 	intel_runtime_pm_put(dev_priv);
1687 
1688 	return 0;
1689 }
1690 
1691 static int i915_fbc_false_color_get(void *data, u64 *val)
1692 {
1693 	struct drm_i915_private *dev_priv = data;
1694 
1695 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1696 		return -ENODEV;
1697 
1698 	*val = dev_priv->fbc.false_color;
1699 
1700 	return 0;
1701 }
1702 
1703 static int i915_fbc_false_color_set(void *data, u64 val)
1704 {
1705 	struct drm_i915_private *dev_priv = data;
1706 	u32 reg;
1707 
1708 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1709 		return -ENODEV;
1710 
1711 	mutex_lock(&dev_priv->fbc.lock);
1712 
1713 	reg = I915_READ(ILK_DPFC_CONTROL);
1714 	dev_priv->fbc.false_color = val;
1715 
1716 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1717 		   (reg | FBC_CTL_FALSE_COLOR) :
1718 		   (reg & ~FBC_CTL_FALSE_COLOR));
1719 
1720 	mutex_unlock(&dev_priv->fbc.lock);
1721 	return 0;
1722 }
1723 
1724 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1725 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1726 			"%llu\n");
1727 
1728 static int i915_ips_status(struct seq_file *m, void *unused)
1729 {
1730 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1731 
1732 	if (!HAS_IPS(dev_priv))
1733 		return -ENODEV;
1734 
1735 	intel_runtime_pm_get(dev_priv);
1736 
1737 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1738 		   yesno(i915_modparams.enable_ips));
1739 
1740 	if (INTEL_GEN(dev_priv) >= 8) {
1741 		seq_puts(m, "Currently: unknown\n");
1742 	} else {
1743 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1744 			seq_puts(m, "Currently: enabled\n");
1745 		else
1746 			seq_puts(m, "Currently: disabled\n");
1747 	}
1748 
1749 	intel_runtime_pm_put(dev_priv);
1750 
1751 	return 0;
1752 }
1753 
1754 static int i915_sr_status(struct seq_file *m, void *unused)
1755 {
1756 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1757 	bool sr_enabled = false;
1758 
1759 	intel_runtime_pm_get(dev_priv);
1760 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1761 
1762 	if (INTEL_GEN(dev_priv) >= 9)
1763 		/* no global SR status; inspect per-plane WM */;
1764 	else if (HAS_PCH_SPLIT(dev_priv))
1765 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1766 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1767 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1768 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1769 	else if (IS_I915GM(dev_priv))
1770 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1771 	else if (IS_PINEVIEW(dev_priv))
1772 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1773 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1774 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1775 
1776 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1777 	intel_runtime_pm_put(dev_priv);
1778 
1779 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1780 
1781 	return 0;
1782 }
1783 
1784 static int i915_emon_status(struct seq_file *m, void *unused)
1785 {
1786 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1787 	struct drm_device *dev = &dev_priv->drm;
1788 	unsigned long temp, chipset, gfx;
1789 	int ret;
1790 
1791 	if (!IS_GEN5(dev_priv))
1792 		return -ENODEV;
1793 
1794 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1795 	if (ret)
1796 		return ret;
1797 
1798 	temp = i915_mch_val(dev_priv);
1799 	chipset = i915_chipset_val(dev_priv);
1800 	gfx = i915_gfx_val(dev_priv);
1801 	mutex_unlock(&dev->struct_mutex);
1802 
1803 	seq_printf(m, "GMCH temp: %ld\n", temp);
1804 	seq_printf(m, "Chipset power: %ld\n", chipset);
1805 	seq_printf(m, "GFX power: %ld\n", gfx);
1806 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1807 
1808 	return 0;
1809 }
1810 
1811 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1812 {
1813 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1814 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1815 	unsigned int max_gpu_freq, min_gpu_freq;
1816 	int gpu_freq, ia_freq;
1817 	int ret;
1818 
1819 	if (!HAS_LLC(dev_priv))
1820 		return -ENODEV;
1821 
1822 	intel_runtime_pm_get(dev_priv);
1823 
1824 	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1825 	if (ret)
1826 		goto out;
1827 
1828 	min_gpu_freq = rps->min_freq;
1829 	max_gpu_freq = rps->max_freq;
1830 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1831 		/* Convert GT frequency to 50 HZ units */
1832 		min_gpu_freq /= GEN9_FREQ_SCALER;
1833 		max_gpu_freq /= GEN9_FREQ_SCALER;
1834 	}
1835 
1836 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1837 
1838 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1839 		ia_freq = gpu_freq;
1840 		sandybridge_pcode_read(dev_priv,
1841 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1842 				       &ia_freq);
1843 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1844 			   intel_gpu_freq(dev_priv, (gpu_freq *
1845 						     (IS_GEN9_BC(dev_priv) ||
1846 						      INTEL_GEN(dev_priv) >= 10 ?
1847 						      GEN9_FREQ_SCALER : 1))),
1848 			   ((ia_freq >> 0) & 0xff) * 100,
1849 			   ((ia_freq >> 8) & 0xff) * 100);
1850 	}
1851 
1852 	mutex_unlock(&dev_priv->pcu_lock);
1853 
1854 out:
1855 	intel_runtime_pm_put(dev_priv);
1856 	return ret;
1857 }
1858 
1859 static int i915_opregion(struct seq_file *m, void *unused)
1860 {
1861 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1862 	struct drm_device *dev = &dev_priv->drm;
1863 	struct intel_opregion *opregion = &dev_priv->opregion;
1864 	int ret;
1865 
1866 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1867 	if (ret)
1868 		goto out;
1869 
1870 	if (opregion->header)
1871 		seq_write(m, opregion->header, OPREGION_SIZE);
1872 
1873 	mutex_unlock(&dev->struct_mutex);
1874 
1875 out:
1876 	return 0;
1877 }
1878 
1879 static int i915_vbt(struct seq_file *m, void *unused)
1880 {
1881 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1882 
1883 	if (opregion->vbt)
1884 		seq_write(m, opregion->vbt, opregion->vbt_size);
1885 
1886 	return 0;
1887 }
1888 
1889 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1890 {
1891 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1892 	struct drm_device *dev = &dev_priv->drm;
1893 	struct intel_framebuffer *fbdev_fb = NULL;
1894 	struct drm_framebuffer *drm_fb;
1895 	int ret;
1896 
1897 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1898 	if (ret)
1899 		return ret;
1900 
1901 #ifdef CONFIG_DRM_FBDEV_EMULATION
1902 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1903 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1904 
1905 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1906 			   fbdev_fb->base.width,
1907 			   fbdev_fb->base.height,
1908 			   fbdev_fb->base.format->depth,
1909 			   fbdev_fb->base.format->cpp[0] * 8,
1910 			   fbdev_fb->base.modifier,
1911 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1912 		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1913 		seq_putc(m, '\n');
1914 	}
1915 #endif
1916 
1917 	mutex_lock(&dev->mode_config.fb_lock);
1918 	drm_for_each_fb(drm_fb, dev) {
1919 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1920 		if (fb == fbdev_fb)
1921 			continue;
1922 
1923 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1924 			   fb->base.width,
1925 			   fb->base.height,
1926 			   fb->base.format->depth,
1927 			   fb->base.format->cpp[0] * 8,
1928 			   fb->base.modifier,
1929 			   drm_framebuffer_read_refcount(&fb->base));
1930 		describe_obj(m, intel_fb_obj(&fb->base));
1931 		seq_putc(m, '\n');
1932 	}
1933 	mutex_unlock(&dev->mode_config.fb_lock);
1934 	mutex_unlock(&dev->struct_mutex);
1935 
1936 	return 0;
1937 }
1938 
1939 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1940 {
1941 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1942 		   ring->space, ring->head, ring->tail, ring->emit);
1943 }
1944 
1945 static int i915_context_status(struct seq_file *m, void *unused)
1946 {
1947 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1948 	struct drm_device *dev = &dev_priv->drm;
1949 	struct intel_engine_cs *engine;
1950 	struct i915_gem_context *ctx;
1951 	enum intel_engine_id id;
1952 	int ret;
1953 
1954 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1955 	if (ret)
1956 		return ret;
1957 
1958 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1959 		seq_printf(m, "HW context %u ", ctx->hw_id);
1960 		if (ctx->pid) {
1961 			struct task_struct *task;
1962 
1963 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1964 			if (task) {
1965 				seq_printf(m, "(%s [%d]) ",
1966 					   task->comm, task->pid);
1967 				put_task_struct(task);
1968 			}
1969 		} else if (IS_ERR(ctx->file_priv)) {
1970 			seq_puts(m, "(deleted) ");
1971 		} else {
1972 			seq_puts(m, "(kernel) ");
1973 		}
1974 
1975 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1976 		seq_putc(m, '\n');
1977 
1978 		for_each_engine(engine, dev_priv, id) {
1979 			struct intel_context *ce =
1980 				to_intel_context(ctx, engine);
1981 
1982 			seq_printf(m, "%s: ", engine->name);
1983 			if (ce->state)
1984 				describe_obj(m, ce->state->obj);
1985 			if (ce->ring)
1986 				describe_ctx_ring(m, ce->ring);
1987 			seq_putc(m, '\n');
1988 		}
1989 
1990 		seq_putc(m, '\n');
1991 	}
1992 
1993 	mutex_unlock(&dev->struct_mutex);
1994 
1995 	return 0;
1996 }
1997 
1998 static const char *swizzle_string(unsigned swizzle)
1999 {
2000 	switch (swizzle) {
2001 	case I915_BIT_6_SWIZZLE_NONE:
2002 		return "none";
2003 	case I915_BIT_6_SWIZZLE_9:
2004 		return "bit9";
2005 	case I915_BIT_6_SWIZZLE_9_10:
2006 		return "bit9/bit10";
2007 	case I915_BIT_6_SWIZZLE_9_11:
2008 		return "bit9/bit11";
2009 	case I915_BIT_6_SWIZZLE_9_10_11:
2010 		return "bit9/bit10/bit11";
2011 	case I915_BIT_6_SWIZZLE_9_17:
2012 		return "bit9/bit17";
2013 	case I915_BIT_6_SWIZZLE_9_10_17:
2014 		return "bit9/bit10/bit17";
2015 	case I915_BIT_6_SWIZZLE_UNKNOWN:
2016 		return "unknown";
2017 	}
2018 
2019 	return "bug";
2020 }
2021 
2022 static int i915_swizzle_info(struct seq_file *m, void *data)
2023 {
2024 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2025 
2026 	intel_runtime_pm_get(dev_priv);
2027 
2028 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2029 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2030 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2031 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2032 
2033 	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2034 		seq_printf(m, "DDC = 0x%08x\n",
2035 			   I915_READ(DCC));
2036 		seq_printf(m, "DDC2 = 0x%08x\n",
2037 			   I915_READ(DCC2));
2038 		seq_printf(m, "C0DRB3 = 0x%04x\n",
2039 			   I915_READ16(C0DRB3));
2040 		seq_printf(m, "C1DRB3 = 0x%04x\n",
2041 			   I915_READ16(C1DRB3));
2042 	} else if (INTEL_GEN(dev_priv) >= 6) {
2043 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2044 			   I915_READ(MAD_DIMM_C0));
2045 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2046 			   I915_READ(MAD_DIMM_C1));
2047 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2048 			   I915_READ(MAD_DIMM_C2));
2049 		seq_printf(m, "TILECTL = 0x%08x\n",
2050 			   I915_READ(TILECTL));
2051 		if (INTEL_GEN(dev_priv) >= 8)
2052 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2053 				   I915_READ(GAMTARBMODE));
2054 		else
2055 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2056 				   I915_READ(ARB_MODE));
2057 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2058 			   I915_READ(DISP_ARB_CTL));
2059 	}
2060 
2061 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2062 		seq_puts(m, "L-shaped memory detected\n");
2063 
2064 	intel_runtime_pm_put(dev_priv);
2065 
2066 	return 0;
2067 }
2068 
2069 static int per_file_ctx(int id, void *ptr, void *data)
2070 {
2071 	struct i915_gem_context *ctx = ptr;
2072 	struct seq_file *m = data;
2073 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2074 
2075 	if (!ppgtt) {
2076 		seq_printf(m, "  no ppgtt for context %d\n",
2077 			   ctx->user_handle);
2078 		return 0;
2079 	}
2080 
2081 	if (i915_gem_context_is_default(ctx))
2082 		seq_puts(m, "  default context:\n");
2083 	else
2084 		seq_printf(m, "  context %d:\n", ctx->user_handle);
2085 	ppgtt->debug_dump(ppgtt, m);
2086 
2087 	return 0;
2088 }
2089 
2090 static void gen8_ppgtt_info(struct seq_file *m,
2091 			    struct drm_i915_private *dev_priv)
2092 {
2093 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2094 	struct intel_engine_cs *engine;
2095 	enum intel_engine_id id;
2096 	int i;
2097 
2098 	if (!ppgtt)
2099 		return;
2100 
2101 	for_each_engine(engine, dev_priv, id) {
2102 		seq_printf(m, "%s\n", engine->name);
2103 		for (i = 0; i < 4; i++) {
2104 			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2105 			pdp <<= 32;
2106 			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2107 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2108 		}
2109 	}
2110 }
2111 
2112 static void gen6_ppgtt_info(struct seq_file *m,
2113 			    struct drm_i915_private *dev_priv)
2114 {
2115 	struct intel_engine_cs *engine;
2116 	enum intel_engine_id id;
2117 
2118 	if (IS_GEN6(dev_priv))
2119 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2120 
2121 	for_each_engine(engine, dev_priv, id) {
2122 		seq_printf(m, "%s\n", engine->name);
2123 		if (IS_GEN7(dev_priv))
2124 			seq_printf(m, "GFX_MODE: 0x%08x\n",
2125 				   I915_READ(RING_MODE_GEN7(engine)));
2126 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2127 			   I915_READ(RING_PP_DIR_BASE(engine)));
2128 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2129 			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
2130 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2131 			   I915_READ(RING_PP_DIR_DCLV(engine)));
2132 	}
2133 	if (dev_priv->mm.aliasing_ppgtt) {
2134 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2135 
2136 		seq_puts(m, "aliasing PPGTT:\n");
2137 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2138 
2139 		ppgtt->debug_dump(ppgtt, m);
2140 	}
2141 
2142 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2143 }
2144 
2145 static int i915_ppgtt_info(struct seq_file *m, void *data)
2146 {
2147 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2148 	struct drm_device *dev = &dev_priv->drm;
2149 	struct drm_file *file;
2150 	int ret;
2151 
2152 	mutex_lock(&dev->filelist_mutex);
2153 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2154 	if (ret)
2155 		goto out_unlock;
2156 
2157 	intel_runtime_pm_get(dev_priv);
2158 
2159 	if (INTEL_GEN(dev_priv) >= 8)
2160 		gen8_ppgtt_info(m, dev_priv);
2161 	else if (INTEL_GEN(dev_priv) >= 6)
2162 		gen6_ppgtt_info(m, dev_priv);
2163 
2164 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2165 		struct drm_i915_file_private *file_priv = file->driver_priv;
2166 		struct task_struct *task;
2167 
2168 		task = get_pid_task(file->pid, PIDTYPE_PID);
2169 		if (!task) {
2170 			ret = -ESRCH;
2171 			goto out_rpm;
2172 		}
2173 		seq_printf(m, "\nproc: %s\n", task->comm);
2174 		put_task_struct(task);
2175 		idr_for_each(&file_priv->context_idr, per_file_ctx,
2176 			     (void *)(unsigned long)m);
2177 	}
2178 
2179 out_rpm:
2180 	intel_runtime_pm_put(dev_priv);
2181 	mutex_unlock(&dev->struct_mutex);
2182 out_unlock:
2183 	mutex_unlock(&dev->filelist_mutex);
2184 	return ret;
2185 }
2186 
2187 static int count_irq_waiters(struct drm_i915_private *i915)
2188 {
2189 	struct intel_engine_cs *engine;
2190 	enum intel_engine_id id;
2191 	int count = 0;
2192 
2193 	for_each_engine(engine, i915, id)
2194 		count += intel_engine_has_waiter(engine);
2195 
2196 	return count;
2197 }
2198 
2199 static const char *rps_power_to_str(unsigned int power)
2200 {
2201 	static const char * const strings[] = {
2202 		[LOW_POWER] = "low power",
2203 		[BETWEEN] = "mixed",
2204 		[HIGH_POWER] = "high power",
2205 	};
2206 
2207 	if (power >= ARRAY_SIZE(strings) || !strings[power])
2208 		return "unknown";
2209 
2210 	return strings[power];
2211 }
2212 
2213 static int i915_rps_boost_info(struct seq_file *m, void *data)
2214 {
2215 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2216 	struct drm_device *dev = &dev_priv->drm;
2217 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
2218 	struct drm_file *file;
2219 
2220 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2221 	seq_printf(m, "GPU busy? %s [%d requests]\n",
2222 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2223 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2224 	seq_printf(m, "Boosts outstanding? %d\n",
2225 		   atomic_read(&rps->num_waiters));
2226 	seq_printf(m, "Frequency requested %d\n",
2227 		   intel_gpu_freq(dev_priv, rps->cur_freq));
2228 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2229 		   intel_gpu_freq(dev_priv, rps->min_freq),
2230 		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2231 		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2232 		   intel_gpu_freq(dev_priv, rps->max_freq));
2233 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2234 		   intel_gpu_freq(dev_priv, rps->idle_freq),
2235 		   intel_gpu_freq(dev_priv, rps->efficient_freq),
2236 		   intel_gpu_freq(dev_priv, rps->boost_freq));
2237 
2238 	mutex_lock(&dev->filelist_mutex);
2239 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2240 		struct drm_i915_file_private *file_priv = file->driver_priv;
2241 		struct task_struct *task;
2242 
2243 		rcu_read_lock();
2244 		task = pid_task(file->pid, PIDTYPE_PID);
2245 		seq_printf(m, "%s [%d]: %d boosts\n",
2246 			   task ? task->comm : "<unknown>",
2247 			   task ? task->pid : -1,
2248 			   atomic_read(&file_priv->rps_client.boosts));
2249 		rcu_read_unlock();
2250 	}
2251 	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2252 		   atomic_read(&rps->boosts));
2253 	mutex_unlock(&dev->filelist_mutex);
2254 
2255 	if (INTEL_GEN(dev_priv) >= 6 &&
2256 	    rps->enabled &&
2257 	    dev_priv->gt.active_requests) {
2258 		u32 rpup, rpupei;
2259 		u32 rpdown, rpdownei;
2260 
2261 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2262 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2263 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2264 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2265 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2266 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2267 
2268 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2269 			   rps_power_to_str(rps->power));
2270 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2271 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
2272 			   rps->up_threshold);
2273 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2274 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2275 			   rps->down_threshold);
2276 	} else {
2277 		seq_puts(m, "\nRPS Autotuning inactive\n");
2278 	}
2279 
2280 	return 0;
2281 }
2282 
2283 static int i915_llc(struct seq_file *m, void *data)
2284 {
2285 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2286 	const bool edram = INTEL_GEN(dev_priv) > 8;
2287 
2288 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2289 	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2290 		   intel_uncore_edram_size(dev_priv)/1024/1024);
2291 
2292 	return 0;
2293 }
2294 
2295 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2296 {
2297 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2298 	struct drm_printer p;
2299 
2300 	if (!HAS_HUC(dev_priv))
2301 		return -ENODEV;
2302 
2303 	p = drm_seq_file_printer(m);
2304 	intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2305 
2306 	intel_runtime_pm_get(dev_priv);
2307 	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2308 	intel_runtime_pm_put(dev_priv);
2309 
2310 	return 0;
2311 }
2312 
2313 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2314 {
2315 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2316 	struct drm_printer p;
2317 	u32 tmp, i;
2318 
2319 	if (!HAS_GUC(dev_priv))
2320 		return -ENODEV;
2321 
2322 	p = drm_seq_file_printer(m);
2323 	intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2324 
2325 	intel_runtime_pm_get(dev_priv);
2326 
2327 	tmp = I915_READ(GUC_STATUS);
2328 
2329 	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2330 	seq_printf(m, "\tBootrom status = 0x%x\n",
2331 		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2332 	seq_printf(m, "\tuKernel status = 0x%x\n",
2333 		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2334 	seq_printf(m, "\tMIA Core status = 0x%x\n",
2335 		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2336 	seq_puts(m, "\nScratch registers:\n");
2337 	for (i = 0; i < 16; i++)
2338 		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2339 
2340 	intel_runtime_pm_put(dev_priv);
2341 
2342 	return 0;
2343 }
2344 
2345 static const char *
2346 stringify_guc_log_type(enum guc_log_buffer_type type)
2347 {
2348 	switch (type) {
2349 	case GUC_ISR_LOG_BUFFER:
2350 		return "ISR";
2351 	case GUC_DPC_LOG_BUFFER:
2352 		return "DPC";
2353 	case GUC_CRASH_DUMP_LOG_BUFFER:
2354 		return "CRASH";
2355 	default:
2356 		MISSING_CASE(type);
2357 	}
2358 
2359 	return "";
2360 }
2361 
2362 static void i915_guc_log_info(struct seq_file *m,
2363 			      struct drm_i915_private *dev_priv)
2364 {
2365 	struct intel_guc_log *log = &dev_priv->guc.log;
2366 	enum guc_log_buffer_type type;
2367 
2368 	if (!intel_guc_log_relay_enabled(log)) {
2369 		seq_puts(m, "GuC log relay disabled\n");
2370 		return;
2371 	}
2372 
2373 	seq_puts(m, "GuC logging stats:\n");
2374 
2375 	seq_printf(m, "\tRelay full count: %u\n",
2376 		   log->relay.full_count);
2377 
2378 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2379 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2380 			   stringify_guc_log_type(type),
2381 			   log->stats[type].flush,
2382 			   log->stats[type].sampled_overflow);
2383 	}
2384 }
2385 
2386 static void i915_guc_client_info(struct seq_file *m,
2387 				 struct drm_i915_private *dev_priv,
2388 				 struct intel_guc_client *client)
2389 {
2390 	struct intel_engine_cs *engine;
2391 	enum intel_engine_id id;
2392 	uint64_t tot = 0;
2393 
2394 	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2395 		client->priority, client->stage_id, client->proc_desc_offset);
2396 	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2397 		client->doorbell_id, client->doorbell_offset);
2398 
2399 	for_each_engine(engine, dev_priv, id) {
2400 		u64 submissions = client->submissions[id];
2401 		tot += submissions;
2402 		seq_printf(m, "\tSubmissions: %llu %s\n",
2403 				submissions, engine->name);
2404 	}
2405 	seq_printf(m, "\tTotal: %llu\n", tot);
2406 }
2407 
2408 static int i915_guc_info(struct seq_file *m, void *data)
2409 {
2410 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2411 	const struct intel_guc *guc = &dev_priv->guc;
2412 
2413 	if (!USES_GUC(dev_priv))
2414 		return -ENODEV;
2415 
2416 	i915_guc_log_info(m, dev_priv);
2417 
2418 	if (!USES_GUC_SUBMISSION(dev_priv))
2419 		return 0;
2420 
2421 	GEM_BUG_ON(!guc->execbuf_client);
2422 
2423 	seq_printf(m, "\nDoorbell map:\n");
2424 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2425 	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2426 
2427 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2428 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2429 	if (guc->preempt_client) {
2430 		seq_printf(m, "\nGuC preempt client @ %p:\n",
2431 			   guc->preempt_client);
2432 		i915_guc_client_info(m, dev_priv, guc->preempt_client);
2433 	}
2434 
2435 	/* Add more as required ... */
2436 
2437 	return 0;
2438 }
2439 
2440 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2441 {
2442 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2443 	const struct intel_guc *guc = &dev_priv->guc;
2444 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2445 	struct intel_guc_client *client = guc->execbuf_client;
2446 	unsigned int tmp;
2447 	int index;
2448 
2449 	if (!USES_GUC_SUBMISSION(dev_priv))
2450 		return -ENODEV;
2451 
2452 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2453 		struct intel_engine_cs *engine;
2454 
2455 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2456 			continue;
2457 
2458 		seq_printf(m, "GuC stage descriptor %u:\n", index);
2459 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2460 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2461 		seq_printf(m, "\tPriority: %d\n", desc->priority);
2462 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2463 		seq_printf(m, "\tEngines used: 0x%x\n",
2464 			   desc->engines_used);
2465 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2466 			   desc->db_trigger_phy,
2467 			   desc->db_trigger_cpu,
2468 			   desc->db_trigger_uk);
2469 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
2470 			   desc->process_desc);
2471 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2472 			   desc->wq_addr, desc->wq_size);
2473 		seq_putc(m, '\n');
2474 
2475 		for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2476 			u32 guc_engine_id = engine->guc_id;
2477 			struct guc_execlist_context *lrc =
2478 						&desc->lrc[guc_engine_id];
2479 
2480 			seq_printf(m, "\t%s LRC:\n", engine->name);
2481 			seq_printf(m, "\t\tContext desc: 0x%x\n",
2482 				   lrc->context_desc);
2483 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2484 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2485 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2486 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2487 			seq_putc(m, '\n');
2488 		}
2489 	}
2490 
2491 	return 0;
2492 }
2493 
2494 static int i915_guc_log_dump(struct seq_file *m, void *data)
2495 {
2496 	struct drm_info_node *node = m->private;
2497 	struct drm_i915_private *dev_priv = node_to_i915(node);
2498 	bool dump_load_err = !!node->info_ent->data;
2499 	struct drm_i915_gem_object *obj = NULL;
2500 	u32 *log;
2501 	int i = 0;
2502 
2503 	if (!HAS_GUC(dev_priv))
2504 		return -ENODEV;
2505 
2506 	if (dump_load_err)
2507 		obj = dev_priv->guc.load_err_log;
2508 	else if (dev_priv->guc.log.vma)
2509 		obj = dev_priv->guc.log.vma->obj;
2510 
2511 	if (!obj)
2512 		return 0;
2513 
2514 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2515 	if (IS_ERR(log)) {
2516 		DRM_DEBUG("Failed to pin object\n");
2517 		seq_puts(m, "(log data unaccessible)\n");
2518 		return PTR_ERR(log);
2519 	}
2520 
2521 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2522 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2523 			   *(log + i), *(log + i + 1),
2524 			   *(log + i + 2), *(log + i + 3));
2525 
2526 	seq_putc(m, '\n');
2527 
2528 	i915_gem_object_unpin_map(obj);
2529 
2530 	return 0;
2531 }
2532 
2533 static int i915_guc_log_level_get(void *data, u64 *val)
2534 {
2535 	struct drm_i915_private *dev_priv = data;
2536 
2537 	if (!USES_GUC(dev_priv))
2538 		return -ENODEV;
2539 
2540 	*val = intel_guc_log_get_level(&dev_priv->guc.log);
2541 
2542 	return 0;
2543 }
2544 
2545 static int i915_guc_log_level_set(void *data, u64 val)
2546 {
2547 	struct drm_i915_private *dev_priv = data;
2548 
2549 	if (!USES_GUC(dev_priv))
2550 		return -ENODEV;
2551 
2552 	return intel_guc_log_set_level(&dev_priv->guc.log, val);
2553 }
2554 
2555 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2556 			i915_guc_log_level_get, i915_guc_log_level_set,
2557 			"%lld\n");
2558 
2559 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2560 {
2561 	struct drm_i915_private *dev_priv = inode->i_private;
2562 
2563 	if (!USES_GUC(dev_priv))
2564 		return -ENODEV;
2565 
2566 	file->private_data = &dev_priv->guc.log;
2567 
2568 	return intel_guc_log_relay_open(&dev_priv->guc.log);
2569 }
2570 
2571 static ssize_t
2572 i915_guc_log_relay_write(struct file *filp,
2573 			 const char __user *ubuf,
2574 			 size_t cnt,
2575 			 loff_t *ppos)
2576 {
2577 	struct intel_guc_log *log = filp->private_data;
2578 
2579 	intel_guc_log_relay_flush(log);
2580 
2581 	return cnt;
2582 }
2583 
2584 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2585 {
2586 	struct drm_i915_private *dev_priv = inode->i_private;
2587 
2588 	intel_guc_log_relay_close(&dev_priv->guc.log);
2589 
2590 	return 0;
2591 }
2592 
2593 static const struct file_operations i915_guc_log_relay_fops = {
2594 	.owner = THIS_MODULE,
2595 	.open = i915_guc_log_relay_open,
2596 	.write = i915_guc_log_relay_write,
2597 	.release = i915_guc_log_relay_release,
2598 };
2599 
2600 static const char *psr2_live_status(u32 val)
2601 {
2602 	static const char * const live_status[] = {
2603 		"IDLE",
2604 		"CAPTURE",
2605 		"CAPTURE_FS",
2606 		"SLEEP",
2607 		"BUFON_FW",
2608 		"ML_UP",
2609 		"SU_STANDBY",
2610 		"FAST_SLEEP",
2611 		"DEEP_SLEEP",
2612 		"BUF_ON",
2613 		"TG_ON"
2614 	};
2615 
2616 	val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2617 	if (val < ARRAY_SIZE(live_status))
2618 		return live_status[val];
2619 
2620 	return "unknown";
2621 }
2622 
2623 static const char *psr_sink_status(u8 val)
2624 {
2625 	static const char * const sink_status[] = {
2626 		"inactive",
2627 		"transition to active, capture and display",
2628 		"active, display from RFB",
2629 		"active, capture and display on sink device timings",
2630 		"transition to inactive, capture and display, timing re-sync",
2631 		"reserved",
2632 		"reserved",
2633 		"sink internal error"
2634 	};
2635 
2636 	val &= DP_PSR_SINK_STATE_MASK;
2637 	if (val < ARRAY_SIZE(sink_status))
2638 		return sink_status[val];
2639 
2640 	return "unknown";
2641 }
2642 
2643 static int i915_edp_psr_status(struct seq_file *m, void *data)
2644 {
2645 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2646 	u32 psrperf = 0;
2647 	bool enabled = false;
2648 	bool sink_support;
2649 
2650 	if (!HAS_PSR(dev_priv))
2651 		return -ENODEV;
2652 
2653 	sink_support = dev_priv->psr.sink_support;
2654 	seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2655 	if (!sink_support)
2656 		return 0;
2657 
2658 	intel_runtime_pm_get(dev_priv);
2659 
2660 	mutex_lock(&dev_priv->psr.lock);
2661 	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2662 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2663 		   dev_priv->psr.busy_frontbuffer_bits);
2664 
2665 	if (dev_priv->psr.psr2_enabled)
2666 		enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2667 	else
2668 		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2669 
2670 	seq_printf(m, "Main link in standby mode: %s\n",
2671 		   yesno(dev_priv->psr.link_standby));
2672 
2673 	seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2674 
2675 	/*
2676 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2677 	 */
2678 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2679 		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2680 			EDP_PSR_PERF_CNT_MASK;
2681 
2682 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2683 	}
2684 	if (dev_priv->psr.psr2_enabled) {
2685 		u32 psr2 = I915_READ(EDP_PSR2_STATUS);
2686 
2687 		seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
2688 			   psr2, psr2_live_status(psr2));
2689 	}
2690 
2691 	if (dev_priv->psr.enabled) {
2692 		struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
2693 		u8 val;
2694 
2695 		if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
2696 			seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
2697 				   psr_sink_status(val));
2698 	}
2699 	mutex_unlock(&dev_priv->psr.lock);
2700 
2701 	if (READ_ONCE(dev_priv->psr.debug)) {
2702 		seq_printf(m, "Last attempted entry at: %lld\n",
2703 			   dev_priv->psr.last_entry_attempt);
2704 		seq_printf(m, "Last exit at: %lld\n",
2705 			   dev_priv->psr.last_exit);
2706 	}
2707 
2708 	intel_runtime_pm_put(dev_priv);
2709 	return 0;
2710 }
2711 
2712 static int
2713 i915_edp_psr_debug_set(void *data, u64 val)
2714 {
2715 	struct drm_i915_private *dev_priv = data;
2716 
2717 	if (!CAN_PSR(dev_priv))
2718 		return -ENODEV;
2719 
2720 	DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
2721 
2722 	intel_runtime_pm_get(dev_priv);
2723 	intel_psr_irq_control(dev_priv, !!val);
2724 	intel_runtime_pm_put(dev_priv);
2725 
2726 	return 0;
2727 }
2728 
2729 static int
2730 i915_edp_psr_debug_get(void *data, u64 *val)
2731 {
2732 	struct drm_i915_private *dev_priv = data;
2733 
2734 	if (!CAN_PSR(dev_priv))
2735 		return -ENODEV;
2736 
2737 	*val = READ_ONCE(dev_priv->psr.debug);
2738 	return 0;
2739 }
2740 
2741 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2742 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2743 			"%llu\n");
2744 
2745 static int i915_sink_crc(struct seq_file *m, void *data)
2746 {
2747 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2748 	struct drm_device *dev = &dev_priv->drm;
2749 	struct intel_connector *connector;
2750 	struct drm_connector_list_iter conn_iter;
2751 	struct intel_dp *intel_dp = NULL;
2752 	struct drm_modeset_acquire_ctx ctx;
2753 	int ret;
2754 	u8 crc[6];
2755 
2756 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2757 
2758 	drm_connector_list_iter_begin(dev, &conn_iter);
2759 
2760 	for_each_intel_connector_iter(connector, &conn_iter) {
2761 		struct drm_crtc *crtc;
2762 		struct drm_connector_state *state;
2763 		struct intel_crtc_state *crtc_state;
2764 
2765 		if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2766 			continue;
2767 
2768 retry:
2769 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2770 		if (ret)
2771 			goto err;
2772 
2773 		state = connector->base.state;
2774 		if (!state->best_encoder)
2775 			continue;
2776 
2777 		crtc = state->crtc;
2778 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2779 		if (ret)
2780 			goto err;
2781 
2782 		crtc_state = to_intel_crtc_state(crtc->state);
2783 		if (!crtc_state->base.active)
2784 			continue;
2785 
2786 		/*
2787 		 * We need to wait for all crtc updates to complete, to make
2788 		 * sure any pending modesets and plane updates are completed.
2789 		 */
2790 		if (crtc_state->base.commit) {
2791 			ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2792 
2793 			if (ret)
2794 				goto err;
2795 		}
2796 
2797 		intel_dp = enc_to_intel_dp(state->best_encoder);
2798 
2799 		ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
2800 		if (ret)
2801 			goto err;
2802 
2803 		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2804 			   crc[0], crc[1], crc[2],
2805 			   crc[3], crc[4], crc[5]);
2806 		goto out;
2807 
2808 err:
2809 		if (ret == -EDEADLK) {
2810 			ret = drm_modeset_backoff(&ctx);
2811 			if (!ret)
2812 				goto retry;
2813 		}
2814 		goto out;
2815 	}
2816 	ret = -ENODEV;
2817 out:
2818 	drm_connector_list_iter_end(&conn_iter);
2819 	drm_modeset_drop_locks(&ctx);
2820 	drm_modeset_acquire_fini(&ctx);
2821 
2822 	return ret;
2823 }
2824 
2825 static int i915_energy_uJ(struct seq_file *m, void *data)
2826 {
2827 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2828 	unsigned long long power;
2829 	u32 units;
2830 
2831 	if (INTEL_GEN(dev_priv) < 6)
2832 		return -ENODEV;
2833 
2834 	intel_runtime_pm_get(dev_priv);
2835 
2836 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2837 		intel_runtime_pm_put(dev_priv);
2838 		return -ENODEV;
2839 	}
2840 
2841 	units = (power & 0x1f00) >> 8;
2842 	power = I915_READ(MCH_SECP_NRG_STTS);
2843 	power = (1000000 * power) >> units; /* convert to uJ */
2844 
2845 	intel_runtime_pm_put(dev_priv);
2846 
2847 	seq_printf(m, "%llu", power);
2848 
2849 	return 0;
2850 }
2851 
2852 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2853 {
2854 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2855 	struct pci_dev *pdev = dev_priv->drm.pdev;
2856 
2857 	if (!HAS_RUNTIME_PM(dev_priv))
2858 		seq_puts(m, "Runtime power management not supported\n");
2859 
2860 	seq_printf(m, "GPU idle: %s (epoch %u)\n",
2861 		   yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2862 	seq_printf(m, "IRQs disabled: %s\n",
2863 		   yesno(!intel_irqs_enabled(dev_priv)));
2864 #ifdef CONFIG_PM
2865 	seq_printf(m, "Usage count: %d\n",
2866 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2867 #else
2868 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2869 #endif
2870 	seq_printf(m, "PCI device power state: %s [%d]\n",
2871 		   pci_power_name(pdev->current_state),
2872 		   pdev->current_state);
2873 
2874 	return 0;
2875 }
2876 
2877 static int i915_power_domain_info(struct seq_file *m, void *unused)
2878 {
2879 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2880 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2881 	int i;
2882 
2883 	mutex_lock(&power_domains->lock);
2884 
2885 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2886 	for (i = 0; i < power_domains->power_well_count; i++) {
2887 		struct i915_power_well *power_well;
2888 		enum intel_display_power_domain power_domain;
2889 
2890 		power_well = &power_domains->power_wells[i];
2891 		seq_printf(m, "%-25s %d\n", power_well->name,
2892 			   power_well->count);
2893 
2894 		for_each_power_domain(power_domain, power_well->domains)
2895 			seq_printf(m, "  %-23s %d\n",
2896 				 intel_display_power_domain_str(power_domain),
2897 				 power_domains->domain_use_count[power_domain]);
2898 	}
2899 
2900 	mutex_unlock(&power_domains->lock);
2901 
2902 	return 0;
2903 }
2904 
2905 static int i915_dmc_info(struct seq_file *m, void *unused)
2906 {
2907 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2908 	struct intel_csr *csr;
2909 
2910 	if (!HAS_CSR(dev_priv))
2911 		return -ENODEV;
2912 
2913 	csr = &dev_priv->csr;
2914 
2915 	intel_runtime_pm_get(dev_priv);
2916 
2917 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2918 	seq_printf(m, "path: %s\n", csr->fw_path);
2919 
2920 	if (!csr->dmc_payload)
2921 		goto out;
2922 
2923 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2924 		   CSR_VERSION_MINOR(csr->version));
2925 
2926 	if (IS_KABYLAKE(dev_priv) ||
2927 	    (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2928 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2929 			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
2930 		seq_printf(m, "DC5 -> DC6 count: %d\n",
2931 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2932 	} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2933 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2934 			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2935 	}
2936 
2937 out:
2938 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2939 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2940 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2941 
2942 	intel_runtime_pm_put(dev_priv);
2943 
2944 	return 0;
2945 }
2946 
2947 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2948 				 struct drm_display_mode *mode)
2949 {
2950 	int i;
2951 
2952 	for (i = 0; i < tabs; i++)
2953 		seq_putc(m, '\t');
2954 
2955 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2956 		   mode->base.id, mode->name,
2957 		   mode->vrefresh, mode->clock,
2958 		   mode->hdisplay, mode->hsync_start,
2959 		   mode->hsync_end, mode->htotal,
2960 		   mode->vdisplay, mode->vsync_start,
2961 		   mode->vsync_end, mode->vtotal,
2962 		   mode->type, mode->flags);
2963 }
2964 
2965 static void intel_encoder_info(struct seq_file *m,
2966 			       struct intel_crtc *intel_crtc,
2967 			       struct intel_encoder *intel_encoder)
2968 {
2969 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2970 	struct drm_device *dev = &dev_priv->drm;
2971 	struct drm_crtc *crtc = &intel_crtc->base;
2972 	struct intel_connector *intel_connector;
2973 	struct drm_encoder *encoder;
2974 
2975 	encoder = &intel_encoder->base;
2976 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2977 		   encoder->base.id, encoder->name);
2978 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2979 		struct drm_connector *connector = &intel_connector->base;
2980 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2981 			   connector->base.id,
2982 			   connector->name,
2983 			   drm_get_connector_status_name(connector->status));
2984 		if (connector->status == connector_status_connected) {
2985 			struct drm_display_mode *mode = &crtc->mode;
2986 			seq_printf(m, ", mode:\n");
2987 			intel_seq_print_mode(m, 2, mode);
2988 		} else {
2989 			seq_putc(m, '\n');
2990 		}
2991 	}
2992 }
2993 
2994 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2995 {
2996 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2997 	struct drm_device *dev = &dev_priv->drm;
2998 	struct drm_crtc *crtc = &intel_crtc->base;
2999 	struct intel_encoder *intel_encoder;
3000 	struct drm_plane_state *plane_state = crtc->primary->state;
3001 	struct drm_framebuffer *fb = plane_state->fb;
3002 
3003 	if (fb)
3004 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
3005 			   fb->base.id, plane_state->src_x >> 16,
3006 			   plane_state->src_y >> 16, fb->width, fb->height);
3007 	else
3008 		seq_puts(m, "\tprimary plane disabled\n");
3009 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3010 		intel_encoder_info(m, intel_crtc, intel_encoder);
3011 }
3012 
3013 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3014 {
3015 	struct drm_display_mode *mode = panel->fixed_mode;
3016 
3017 	seq_printf(m, "\tfixed mode:\n");
3018 	intel_seq_print_mode(m, 2, mode);
3019 }
3020 
3021 static void intel_dp_info(struct seq_file *m,
3022 			  struct intel_connector *intel_connector)
3023 {
3024 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3025 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3026 
3027 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3028 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3029 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3030 		intel_panel_info(m, &intel_connector->panel);
3031 
3032 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3033 				&intel_dp->aux);
3034 }
3035 
3036 static void intel_dp_mst_info(struct seq_file *m,
3037 			  struct intel_connector *intel_connector)
3038 {
3039 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3040 	struct intel_dp_mst_encoder *intel_mst =
3041 		enc_to_mst(&intel_encoder->base);
3042 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
3043 	struct intel_dp *intel_dp = &intel_dig_port->dp;
3044 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3045 					intel_connector->port);
3046 
3047 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3048 }
3049 
3050 static void intel_hdmi_info(struct seq_file *m,
3051 			    struct intel_connector *intel_connector)
3052 {
3053 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3054 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3055 
3056 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3057 }
3058 
3059 static void intel_lvds_info(struct seq_file *m,
3060 			    struct intel_connector *intel_connector)
3061 {
3062 	intel_panel_info(m, &intel_connector->panel);
3063 }
3064 
3065 static void intel_connector_info(struct seq_file *m,
3066 				 struct drm_connector *connector)
3067 {
3068 	struct intel_connector *intel_connector = to_intel_connector(connector);
3069 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3070 	struct drm_display_mode *mode;
3071 
3072 	seq_printf(m, "connector %d: type %s, status: %s\n",
3073 		   connector->base.id, connector->name,
3074 		   drm_get_connector_status_name(connector->status));
3075 	if (connector->status == connector_status_connected) {
3076 		seq_printf(m, "\tname: %s\n", connector->display_info.name);
3077 		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3078 			   connector->display_info.width_mm,
3079 			   connector->display_info.height_mm);
3080 		seq_printf(m, "\tsubpixel order: %s\n",
3081 			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3082 		seq_printf(m, "\tCEA rev: %d\n",
3083 			   connector->display_info.cea_rev);
3084 	}
3085 
3086 	if (!intel_encoder)
3087 		return;
3088 
3089 	switch (connector->connector_type) {
3090 	case DRM_MODE_CONNECTOR_DisplayPort:
3091 	case DRM_MODE_CONNECTOR_eDP:
3092 		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3093 			intel_dp_mst_info(m, intel_connector);
3094 		else
3095 			intel_dp_info(m, intel_connector);
3096 		break;
3097 	case DRM_MODE_CONNECTOR_LVDS:
3098 		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3099 			intel_lvds_info(m, intel_connector);
3100 		break;
3101 	case DRM_MODE_CONNECTOR_HDMIA:
3102 		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3103 		    intel_encoder->type == INTEL_OUTPUT_DDI)
3104 			intel_hdmi_info(m, intel_connector);
3105 		break;
3106 	default:
3107 		break;
3108 	}
3109 
3110 	seq_printf(m, "\tmodes:\n");
3111 	list_for_each_entry(mode, &connector->modes, head)
3112 		intel_seq_print_mode(m, 2, mode);
3113 }
3114 
3115 static const char *plane_type(enum drm_plane_type type)
3116 {
3117 	switch (type) {
3118 	case DRM_PLANE_TYPE_OVERLAY:
3119 		return "OVL";
3120 	case DRM_PLANE_TYPE_PRIMARY:
3121 		return "PRI";
3122 	case DRM_PLANE_TYPE_CURSOR:
3123 		return "CUR";
3124 	/*
3125 	 * Deliberately omitting default: to generate compiler warnings
3126 	 * when a new drm_plane_type gets added.
3127 	 */
3128 	}
3129 
3130 	return "unknown";
3131 }
3132 
3133 static const char *plane_rotation(unsigned int rotation)
3134 {
3135 	static char buf[48];
3136 	/*
3137 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3138 	 * will print them all to visualize if the values are misused
3139 	 */
3140 	snprintf(buf, sizeof(buf),
3141 		 "%s%s%s%s%s%s(0x%08x)",
3142 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3143 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3144 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3145 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3146 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3147 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3148 		 rotation);
3149 
3150 	return buf;
3151 }
3152 
3153 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3154 {
3155 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3156 	struct drm_device *dev = &dev_priv->drm;
3157 	struct intel_plane *intel_plane;
3158 
3159 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3160 		struct drm_plane_state *state;
3161 		struct drm_plane *plane = &intel_plane->base;
3162 		struct drm_format_name_buf format_name;
3163 
3164 		if (!plane->state) {
3165 			seq_puts(m, "plane->state is NULL!\n");
3166 			continue;
3167 		}
3168 
3169 		state = plane->state;
3170 
3171 		if (state->fb) {
3172 			drm_get_format_name(state->fb->format->format,
3173 					    &format_name);
3174 		} else {
3175 			sprintf(format_name.str, "N/A");
3176 		}
3177 
3178 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3179 			   plane->base.id,
3180 			   plane_type(intel_plane->base.type),
3181 			   state->crtc_x, state->crtc_y,
3182 			   state->crtc_w, state->crtc_h,
3183 			   (state->src_x >> 16),
3184 			   ((state->src_x & 0xffff) * 15625) >> 10,
3185 			   (state->src_y >> 16),
3186 			   ((state->src_y & 0xffff) * 15625) >> 10,
3187 			   (state->src_w >> 16),
3188 			   ((state->src_w & 0xffff) * 15625) >> 10,
3189 			   (state->src_h >> 16),
3190 			   ((state->src_h & 0xffff) * 15625) >> 10,
3191 			   format_name.str,
3192 			   plane_rotation(state->rotation));
3193 	}
3194 }
3195 
3196 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3197 {
3198 	struct intel_crtc_state *pipe_config;
3199 	int num_scalers = intel_crtc->num_scalers;
3200 	int i;
3201 
3202 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3203 
3204 	/* Not all platformas have a scaler */
3205 	if (num_scalers) {
3206 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3207 			   num_scalers,
3208 			   pipe_config->scaler_state.scaler_users,
3209 			   pipe_config->scaler_state.scaler_id);
3210 
3211 		for (i = 0; i < num_scalers; i++) {
3212 			struct intel_scaler *sc =
3213 					&pipe_config->scaler_state.scalers[i];
3214 
3215 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3216 				   i, yesno(sc->in_use), sc->mode);
3217 		}
3218 		seq_puts(m, "\n");
3219 	} else {
3220 		seq_puts(m, "\tNo scalers available on this platform\n");
3221 	}
3222 }
3223 
3224 static int i915_display_info(struct seq_file *m, void *unused)
3225 {
3226 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3227 	struct drm_device *dev = &dev_priv->drm;
3228 	struct intel_crtc *crtc;
3229 	struct drm_connector *connector;
3230 	struct drm_connector_list_iter conn_iter;
3231 
3232 	intel_runtime_pm_get(dev_priv);
3233 	seq_printf(m, "CRTC info\n");
3234 	seq_printf(m, "---------\n");
3235 	for_each_intel_crtc(dev, crtc) {
3236 		struct intel_crtc_state *pipe_config;
3237 
3238 		drm_modeset_lock(&crtc->base.mutex, NULL);
3239 		pipe_config = to_intel_crtc_state(crtc->base.state);
3240 
3241 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3242 			   crtc->base.base.id, pipe_name(crtc->pipe),
3243 			   yesno(pipe_config->base.active),
3244 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3245 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3246 
3247 		if (pipe_config->base.active) {
3248 			struct intel_plane *cursor =
3249 				to_intel_plane(crtc->base.cursor);
3250 
3251 			intel_crtc_info(m, crtc);
3252 
3253 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3254 				   yesno(cursor->base.state->visible),
3255 				   cursor->base.state->crtc_x,
3256 				   cursor->base.state->crtc_y,
3257 				   cursor->base.state->crtc_w,
3258 				   cursor->base.state->crtc_h,
3259 				   cursor->cursor.base);
3260 			intel_scaler_info(m, crtc);
3261 			intel_plane_info(m, crtc);
3262 		}
3263 
3264 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3265 			   yesno(!crtc->cpu_fifo_underrun_disabled),
3266 			   yesno(!crtc->pch_fifo_underrun_disabled));
3267 		drm_modeset_unlock(&crtc->base.mutex);
3268 	}
3269 
3270 	seq_printf(m, "\n");
3271 	seq_printf(m, "Connector info\n");
3272 	seq_printf(m, "--------------\n");
3273 	mutex_lock(&dev->mode_config.mutex);
3274 	drm_connector_list_iter_begin(dev, &conn_iter);
3275 	drm_for_each_connector_iter(connector, &conn_iter)
3276 		intel_connector_info(m, connector);
3277 	drm_connector_list_iter_end(&conn_iter);
3278 	mutex_unlock(&dev->mode_config.mutex);
3279 
3280 	intel_runtime_pm_put(dev_priv);
3281 
3282 	return 0;
3283 }
3284 
3285 static int i915_engine_info(struct seq_file *m, void *unused)
3286 {
3287 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3288 	struct intel_engine_cs *engine;
3289 	enum intel_engine_id id;
3290 	struct drm_printer p;
3291 
3292 	intel_runtime_pm_get(dev_priv);
3293 
3294 	seq_printf(m, "GT awake? %s (epoch %u)\n",
3295 		   yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3296 	seq_printf(m, "Global active requests: %d\n",
3297 		   dev_priv->gt.active_requests);
3298 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
3299 		   dev_priv->info.cs_timestamp_frequency_khz);
3300 
3301 	p = drm_seq_file_printer(m);
3302 	for_each_engine(engine, dev_priv, id)
3303 		intel_engine_dump(engine, &p, "%s\n", engine->name);
3304 
3305 	intel_runtime_pm_put(dev_priv);
3306 
3307 	return 0;
3308 }
3309 
3310 static int i915_rcs_topology(struct seq_file *m, void *unused)
3311 {
3312 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3313 	struct drm_printer p = drm_seq_file_printer(m);
3314 
3315 	intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3316 
3317 	return 0;
3318 }
3319 
3320 static int i915_shrinker_info(struct seq_file *m, void *unused)
3321 {
3322 	struct drm_i915_private *i915 = node_to_i915(m->private);
3323 
3324 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3325 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3326 
3327 	return 0;
3328 }
3329 
3330 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3331 {
3332 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3333 	struct drm_device *dev = &dev_priv->drm;
3334 	int i;
3335 
3336 	drm_modeset_lock_all(dev);
3337 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3338 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3339 
3340 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3341 			   pll->info->id);
3342 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3343 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3344 		seq_printf(m, " tracked hardware state:\n");
3345 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3346 		seq_printf(m, " dpll_md: 0x%08x\n",
3347 			   pll->state.hw_state.dpll_md);
3348 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3349 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3350 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3351 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3352 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3353 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3354 			   pll->state.hw_state.mg_refclkin_ctl);
3355 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3356 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
3357 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3358 			   pll->state.hw_state.mg_clktop2_hsclkctl);
3359 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
3360 			   pll->state.hw_state.mg_pll_div0);
3361 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
3362 			   pll->state.hw_state.mg_pll_div1);
3363 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
3364 			   pll->state.hw_state.mg_pll_lf);
3365 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3366 			   pll->state.hw_state.mg_pll_frac_lock);
3367 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3368 			   pll->state.hw_state.mg_pll_ssc);
3369 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
3370 			   pll->state.hw_state.mg_pll_bias);
3371 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3372 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
3373 	}
3374 	drm_modeset_unlock_all(dev);
3375 
3376 	return 0;
3377 }
3378 
3379 static int i915_wa_registers(struct seq_file *m, void *unused)
3380 {
3381 	struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
3382 	int i;
3383 
3384 	seq_printf(m, "Workarounds applied: %d\n", wa->count);
3385 	for (i = 0; i < wa->count; ++i)
3386 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3387 			   wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
3388 
3389 	return 0;
3390 }
3391 
3392 static int i915_ipc_status_show(struct seq_file *m, void *data)
3393 {
3394 	struct drm_i915_private *dev_priv = m->private;
3395 
3396 	seq_printf(m, "Isochronous Priority Control: %s\n",
3397 			yesno(dev_priv->ipc_enabled));
3398 	return 0;
3399 }
3400 
3401 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3402 {
3403 	struct drm_i915_private *dev_priv = inode->i_private;
3404 
3405 	if (!HAS_IPC(dev_priv))
3406 		return -ENODEV;
3407 
3408 	return single_open(file, i915_ipc_status_show, dev_priv);
3409 }
3410 
3411 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3412 				     size_t len, loff_t *offp)
3413 {
3414 	struct seq_file *m = file->private_data;
3415 	struct drm_i915_private *dev_priv = m->private;
3416 	int ret;
3417 	bool enable;
3418 
3419 	ret = kstrtobool_from_user(ubuf, len, &enable);
3420 	if (ret < 0)
3421 		return ret;
3422 
3423 	intel_runtime_pm_get(dev_priv);
3424 	if (!dev_priv->ipc_enabled && enable)
3425 		DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3426 	dev_priv->wm.distrust_bios_wm = true;
3427 	dev_priv->ipc_enabled = enable;
3428 	intel_enable_ipc(dev_priv);
3429 	intel_runtime_pm_put(dev_priv);
3430 
3431 	return len;
3432 }
3433 
3434 static const struct file_operations i915_ipc_status_fops = {
3435 	.owner = THIS_MODULE,
3436 	.open = i915_ipc_status_open,
3437 	.read = seq_read,
3438 	.llseek = seq_lseek,
3439 	.release = single_release,
3440 	.write = i915_ipc_status_write
3441 };
3442 
3443 static int i915_ddb_info(struct seq_file *m, void *unused)
3444 {
3445 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3446 	struct drm_device *dev = &dev_priv->drm;
3447 	struct skl_ddb_allocation *ddb;
3448 	struct skl_ddb_entry *entry;
3449 	enum pipe pipe;
3450 	int plane;
3451 
3452 	if (INTEL_GEN(dev_priv) < 9)
3453 		return -ENODEV;
3454 
3455 	drm_modeset_lock_all(dev);
3456 
3457 	ddb = &dev_priv->wm.skl_hw.ddb;
3458 
3459 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3460 
3461 	for_each_pipe(dev_priv, pipe) {
3462 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3463 
3464 		for_each_universal_plane(dev_priv, pipe, plane) {
3465 			entry = &ddb->plane[pipe][plane];
3466 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3467 				   entry->start, entry->end,
3468 				   skl_ddb_entry_size(entry));
3469 		}
3470 
3471 		entry = &ddb->plane[pipe][PLANE_CURSOR];
3472 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3473 			   entry->end, skl_ddb_entry_size(entry));
3474 	}
3475 
3476 	drm_modeset_unlock_all(dev);
3477 
3478 	return 0;
3479 }
3480 
3481 static void drrs_status_per_crtc(struct seq_file *m,
3482 				 struct drm_device *dev,
3483 				 struct intel_crtc *intel_crtc)
3484 {
3485 	struct drm_i915_private *dev_priv = to_i915(dev);
3486 	struct i915_drrs *drrs = &dev_priv->drrs;
3487 	int vrefresh = 0;
3488 	struct drm_connector *connector;
3489 	struct drm_connector_list_iter conn_iter;
3490 
3491 	drm_connector_list_iter_begin(dev, &conn_iter);
3492 	drm_for_each_connector_iter(connector, &conn_iter) {
3493 		if (connector->state->crtc != &intel_crtc->base)
3494 			continue;
3495 
3496 		seq_printf(m, "%s:\n", connector->name);
3497 	}
3498 	drm_connector_list_iter_end(&conn_iter);
3499 
3500 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3501 		seq_puts(m, "\tVBT: DRRS_type: Static");
3502 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3503 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3504 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3505 		seq_puts(m, "\tVBT: DRRS_type: None");
3506 	else
3507 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3508 
3509 	seq_puts(m, "\n\n");
3510 
3511 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3512 		struct intel_panel *panel;
3513 
3514 		mutex_lock(&drrs->mutex);
3515 		/* DRRS Supported */
3516 		seq_puts(m, "\tDRRS Supported: Yes\n");
3517 
3518 		/* disable_drrs() will make drrs->dp NULL */
3519 		if (!drrs->dp) {
3520 			seq_puts(m, "Idleness DRRS: Disabled\n");
3521 			if (dev_priv->psr.enabled)
3522 				seq_puts(m,
3523 				"\tAs PSR is enabled, DRRS is not enabled\n");
3524 			mutex_unlock(&drrs->mutex);
3525 			return;
3526 		}
3527 
3528 		panel = &drrs->dp->attached_connector->panel;
3529 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3530 					drrs->busy_frontbuffer_bits);
3531 
3532 		seq_puts(m, "\n\t\t");
3533 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3534 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3535 			vrefresh = panel->fixed_mode->vrefresh;
3536 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3537 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3538 			vrefresh = panel->downclock_mode->vrefresh;
3539 		} else {
3540 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3541 						drrs->refresh_rate_type);
3542 			mutex_unlock(&drrs->mutex);
3543 			return;
3544 		}
3545 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3546 
3547 		seq_puts(m, "\n\t\t");
3548 		mutex_unlock(&drrs->mutex);
3549 	} else {
3550 		/* DRRS not supported. Print the VBT parameter*/
3551 		seq_puts(m, "\tDRRS Supported : No");
3552 	}
3553 	seq_puts(m, "\n");
3554 }
3555 
3556 static int i915_drrs_status(struct seq_file *m, void *unused)
3557 {
3558 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3559 	struct drm_device *dev = &dev_priv->drm;
3560 	struct intel_crtc *intel_crtc;
3561 	int active_crtc_cnt = 0;
3562 
3563 	drm_modeset_lock_all(dev);
3564 	for_each_intel_crtc(dev, intel_crtc) {
3565 		if (intel_crtc->base.state->active) {
3566 			active_crtc_cnt++;
3567 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3568 
3569 			drrs_status_per_crtc(m, dev, intel_crtc);
3570 		}
3571 	}
3572 	drm_modeset_unlock_all(dev);
3573 
3574 	if (!active_crtc_cnt)
3575 		seq_puts(m, "No active crtc found\n");
3576 
3577 	return 0;
3578 }
3579 
3580 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3581 {
3582 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3583 	struct drm_device *dev = &dev_priv->drm;
3584 	struct intel_encoder *intel_encoder;
3585 	struct intel_digital_port *intel_dig_port;
3586 	struct drm_connector *connector;
3587 	struct drm_connector_list_iter conn_iter;
3588 
3589 	drm_connector_list_iter_begin(dev, &conn_iter);
3590 	drm_for_each_connector_iter(connector, &conn_iter) {
3591 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3592 			continue;
3593 
3594 		intel_encoder = intel_attached_encoder(connector);
3595 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3596 			continue;
3597 
3598 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3599 		if (!intel_dig_port->dp.can_mst)
3600 			continue;
3601 
3602 		seq_printf(m, "MST Source Port %c\n",
3603 			   port_name(intel_dig_port->base.port));
3604 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3605 	}
3606 	drm_connector_list_iter_end(&conn_iter);
3607 
3608 	return 0;
3609 }
3610 
3611 static ssize_t i915_displayport_test_active_write(struct file *file,
3612 						  const char __user *ubuf,
3613 						  size_t len, loff_t *offp)
3614 {
3615 	char *input_buffer;
3616 	int status = 0;
3617 	struct drm_device *dev;
3618 	struct drm_connector *connector;
3619 	struct drm_connector_list_iter conn_iter;
3620 	struct intel_dp *intel_dp;
3621 	int val = 0;
3622 
3623 	dev = ((struct seq_file *)file->private_data)->private;
3624 
3625 	if (len == 0)
3626 		return 0;
3627 
3628 	input_buffer = memdup_user_nul(ubuf, len);
3629 	if (IS_ERR(input_buffer))
3630 		return PTR_ERR(input_buffer);
3631 
3632 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3633 
3634 	drm_connector_list_iter_begin(dev, &conn_iter);
3635 	drm_for_each_connector_iter(connector, &conn_iter) {
3636 		struct intel_encoder *encoder;
3637 
3638 		if (connector->connector_type !=
3639 		    DRM_MODE_CONNECTOR_DisplayPort)
3640 			continue;
3641 
3642 		encoder = to_intel_encoder(connector->encoder);
3643 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3644 			continue;
3645 
3646 		if (encoder && connector->status == connector_status_connected) {
3647 			intel_dp = enc_to_intel_dp(&encoder->base);
3648 			status = kstrtoint(input_buffer, 10, &val);
3649 			if (status < 0)
3650 				break;
3651 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3652 			/* To prevent erroneous activation of the compliance
3653 			 * testing code, only accept an actual value of 1 here
3654 			 */
3655 			if (val == 1)
3656 				intel_dp->compliance.test_active = 1;
3657 			else
3658 				intel_dp->compliance.test_active = 0;
3659 		}
3660 	}
3661 	drm_connector_list_iter_end(&conn_iter);
3662 	kfree(input_buffer);
3663 	if (status < 0)
3664 		return status;
3665 
3666 	*offp += len;
3667 	return len;
3668 }
3669 
3670 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3671 {
3672 	struct drm_i915_private *dev_priv = m->private;
3673 	struct drm_device *dev = &dev_priv->drm;
3674 	struct drm_connector *connector;
3675 	struct drm_connector_list_iter conn_iter;
3676 	struct intel_dp *intel_dp;
3677 
3678 	drm_connector_list_iter_begin(dev, &conn_iter);
3679 	drm_for_each_connector_iter(connector, &conn_iter) {
3680 		struct intel_encoder *encoder;
3681 
3682 		if (connector->connector_type !=
3683 		    DRM_MODE_CONNECTOR_DisplayPort)
3684 			continue;
3685 
3686 		encoder = to_intel_encoder(connector->encoder);
3687 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3688 			continue;
3689 
3690 		if (encoder && connector->status == connector_status_connected) {
3691 			intel_dp = enc_to_intel_dp(&encoder->base);
3692 			if (intel_dp->compliance.test_active)
3693 				seq_puts(m, "1");
3694 			else
3695 				seq_puts(m, "0");
3696 		} else
3697 			seq_puts(m, "0");
3698 	}
3699 	drm_connector_list_iter_end(&conn_iter);
3700 
3701 	return 0;
3702 }
3703 
3704 static int i915_displayport_test_active_open(struct inode *inode,
3705 					     struct file *file)
3706 {
3707 	return single_open(file, i915_displayport_test_active_show,
3708 			   inode->i_private);
3709 }
3710 
3711 static const struct file_operations i915_displayport_test_active_fops = {
3712 	.owner = THIS_MODULE,
3713 	.open = i915_displayport_test_active_open,
3714 	.read = seq_read,
3715 	.llseek = seq_lseek,
3716 	.release = single_release,
3717 	.write = i915_displayport_test_active_write
3718 };
3719 
3720 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3721 {
3722 	struct drm_i915_private *dev_priv = m->private;
3723 	struct drm_device *dev = &dev_priv->drm;
3724 	struct drm_connector *connector;
3725 	struct drm_connector_list_iter conn_iter;
3726 	struct intel_dp *intel_dp;
3727 
3728 	drm_connector_list_iter_begin(dev, &conn_iter);
3729 	drm_for_each_connector_iter(connector, &conn_iter) {
3730 		struct intel_encoder *encoder;
3731 
3732 		if (connector->connector_type !=
3733 		    DRM_MODE_CONNECTOR_DisplayPort)
3734 			continue;
3735 
3736 		encoder = to_intel_encoder(connector->encoder);
3737 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3738 			continue;
3739 
3740 		if (encoder && connector->status == connector_status_connected) {
3741 			intel_dp = enc_to_intel_dp(&encoder->base);
3742 			if (intel_dp->compliance.test_type ==
3743 			    DP_TEST_LINK_EDID_READ)
3744 				seq_printf(m, "%lx",
3745 					   intel_dp->compliance.test_data.edid);
3746 			else if (intel_dp->compliance.test_type ==
3747 				 DP_TEST_LINK_VIDEO_PATTERN) {
3748 				seq_printf(m, "hdisplay: %d\n",
3749 					   intel_dp->compliance.test_data.hdisplay);
3750 				seq_printf(m, "vdisplay: %d\n",
3751 					   intel_dp->compliance.test_data.vdisplay);
3752 				seq_printf(m, "bpc: %u\n",
3753 					   intel_dp->compliance.test_data.bpc);
3754 			}
3755 		} else
3756 			seq_puts(m, "0");
3757 	}
3758 	drm_connector_list_iter_end(&conn_iter);
3759 
3760 	return 0;
3761 }
3762 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3763 
3764 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3765 {
3766 	struct drm_i915_private *dev_priv = m->private;
3767 	struct drm_device *dev = &dev_priv->drm;
3768 	struct drm_connector *connector;
3769 	struct drm_connector_list_iter conn_iter;
3770 	struct intel_dp *intel_dp;
3771 
3772 	drm_connector_list_iter_begin(dev, &conn_iter);
3773 	drm_for_each_connector_iter(connector, &conn_iter) {
3774 		struct intel_encoder *encoder;
3775 
3776 		if (connector->connector_type !=
3777 		    DRM_MODE_CONNECTOR_DisplayPort)
3778 			continue;
3779 
3780 		encoder = to_intel_encoder(connector->encoder);
3781 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3782 			continue;
3783 
3784 		if (encoder && connector->status == connector_status_connected) {
3785 			intel_dp = enc_to_intel_dp(&encoder->base);
3786 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3787 		} else
3788 			seq_puts(m, "0");
3789 	}
3790 	drm_connector_list_iter_end(&conn_iter);
3791 
3792 	return 0;
3793 }
3794 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3795 
3796 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3797 {
3798 	struct drm_i915_private *dev_priv = m->private;
3799 	struct drm_device *dev = &dev_priv->drm;
3800 	int level;
3801 	int num_levels;
3802 
3803 	if (IS_CHERRYVIEW(dev_priv))
3804 		num_levels = 3;
3805 	else if (IS_VALLEYVIEW(dev_priv))
3806 		num_levels = 1;
3807 	else if (IS_G4X(dev_priv))
3808 		num_levels = 3;
3809 	else
3810 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3811 
3812 	drm_modeset_lock_all(dev);
3813 
3814 	for (level = 0; level < num_levels; level++) {
3815 		unsigned int latency = wm[level];
3816 
3817 		/*
3818 		 * - WM1+ latency values in 0.5us units
3819 		 * - latencies are in us on gen9/vlv/chv
3820 		 */
3821 		if (INTEL_GEN(dev_priv) >= 9 ||
3822 		    IS_VALLEYVIEW(dev_priv) ||
3823 		    IS_CHERRYVIEW(dev_priv) ||
3824 		    IS_G4X(dev_priv))
3825 			latency *= 10;
3826 		else if (level > 0)
3827 			latency *= 5;
3828 
3829 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3830 			   level, wm[level], latency / 10, latency % 10);
3831 	}
3832 
3833 	drm_modeset_unlock_all(dev);
3834 }
3835 
3836 static int pri_wm_latency_show(struct seq_file *m, void *data)
3837 {
3838 	struct drm_i915_private *dev_priv = m->private;
3839 	const uint16_t *latencies;
3840 
3841 	if (INTEL_GEN(dev_priv) >= 9)
3842 		latencies = dev_priv->wm.skl_latency;
3843 	else
3844 		latencies = dev_priv->wm.pri_latency;
3845 
3846 	wm_latency_show(m, latencies);
3847 
3848 	return 0;
3849 }
3850 
3851 static int spr_wm_latency_show(struct seq_file *m, void *data)
3852 {
3853 	struct drm_i915_private *dev_priv = m->private;
3854 	const uint16_t *latencies;
3855 
3856 	if (INTEL_GEN(dev_priv) >= 9)
3857 		latencies = dev_priv->wm.skl_latency;
3858 	else
3859 		latencies = dev_priv->wm.spr_latency;
3860 
3861 	wm_latency_show(m, latencies);
3862 
3863 	return 0;
3864 }
3865 
3866 static int cur_wm_latency_show(struct seq_file *m, void *data)
3867 {
3868 	struct drm_i915_private *dev_priv = m->private;
3869 	const uint16_t *latencies;
3870 
3871 	if (INTEL_GEN(dev_priv) >= 9)
3872 		latencies = dev_priv->wm.skl_latency;
3873 	else
3874 		latencies = dev_priv->wm.cur_latency;
3875 
3876 	wm_latency_show(m, latencies);
3877 
3878 	return 0;
3879 }
3880 
3881 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3882 {
3883 	struct drm_i915_private *dev_priv = inode->i_private;
3884 
3885 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3886 		return -ENODEV;
3887 
3888 	return single_open(file, pri_wm_latency_show, dev_priv);
3889 }
3890 
3891 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3892 {
3893 	struct drm_i915_private *dev_priv = inode->i_private;
3894 
3895 	if (HAS_GMCH_DISPLAY(dev_priv))
3896 		return -ENODEV;
3897 
3898 	return single_open(file, spr_wm_latency_show, dev_priv);
3899 }
3900 
3901 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3902 {
3903 	struct drm_i915_private *dev_priv = inode->i_private;
3904 
3905 	if (HAS_GMCH_DISPLAY(dev_priv))
3906 		return -ENODEV;
3907 
3908 	return single_open(file, cur_wm_latency_show, dev_priv);
3909 }
3910 
3911 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3912 				size_t len, loff_t *offp, uint16_t wm[8])
3913 {
3914 	struct seq_file *m = file->private_data;
3915 	struct drm_i915_private *dev_priv = m->private;
3916 	struct drm_device *dev = &dev_priv->drm;
3917 	uint16_t new[8] = { 0 };
3918 	int num_levels;
3919 	int level;
3920 	int ret;
3921 	char tmp[32];
3922 
3923 	if (IS_CHERRYVIEW(dev_priv))
3924 		num_levels = 3;
3925 	else if (IS_VALLEYVIEW(dev_priv))
3926 		num_levels = 1;
3927 	else if (IS_G4X(dev_priv))
3928 		num_levels = 3;
3929 	else
3930 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3931 
3932 	if (len >= sizeof(tmp))
3933 		return -EINVAL;
3934 
3935 	if (copy_from_user(tmp, ubuf, len))
3936 		return -EFAULT;
3937 
3938 	tmp[len] = '\0';
3939 
3940 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3941 		     &new[0], &new[1], &new[2], &new[3],
3942 		     &new[4], &new[5], &new[6], &new[7]);
3943 	if (ret != num_levels)
3944 		return -EINVAL;
3945 
3946 	drm_modeset_lock_all(dev);
3947 
3948 	for (level = 0; level < num_levels; level++)
3949 		wm[level] = new[level];
3950 
3951 	drm_modeset_unlock_all(dev);
3952 
3953 	return len;
3954 }
3955 
3956 
3957 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3958 				    size_t len, loff_t *offp)
3959 {
3960 	struct seq_file *m = file->private_data;
3961 	struct drm_i915_private *dev_priv = m->private;
3962 	uint16_t *latencies;
3963 
3964 	if (INTEL_GEN(dev_priv) >= 9)
3965 		latencies = dev_priv->wm.skl_latency;
3966 	else
3967 		latencies = dev_priv->wm.pri_latency;
3968 
3969 	return wm_latency_write(file, ubuf, len, offp, latencies);
3970 }
3971 
3972 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3973 				    size_t len, loff_t *offp)
3974 {
3975 	struct seq_file *m = file->private_data;
3976 	struct drm_i915_private *dev_priv = m->private;
3977 	uint16_t *latencies;
3978 
3979 	if (INTEL_GEN(dev_priv) >= 9)
3980 		latencies = dev_priv->wm.skl_latency;
3981 	else
3982 		latencies = dev_priv->wm.spr_latency;
3983 
3984 	return wm_latency_write(file, ubuf, len, offp, latencies);
3985 }
3986 
3987 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3988 				    size_t len, loff_t *offp)
3989 {
3990 	struct seq_file *m = file->private_data;
3991 	struct drm_i915_private *dev_priv = m->private;
3992 	uint16_t *latencies;
3993 
3994 	if (INTEL_GEN(dev_priv) >= 9)
3995 		latencies = dev_priv->wm.skl_latency;
3996 	else
3997 		latencies = dev_priv->wm.cur_latency;
3998 
3999 	return wm_latency_write(file, ubuf, len, offp, latencies);
4000 }
4001 
4002 static const struct file_operations i915_pri_wm_latency_fops = {
4003 	.owner = THIS_MODULE,
4004 	.open = pri_wm_latency_open,
4005 	.read = seq_read,
4006 	.llseek = seq_lseek,
4007 	.release = single_release,
4008 	.write = pri_wm_latency_write
4009 };
4010 
4011 static const struct file_operations i915_spr_wm_latency_fops = {
4012 	.owner = THIS_MODULE,
4013 	.open = spr_wm_latency_open,
4014 	.read = seq_read,
4015 	.llseek = seq_lseek,
4016 	.release = single_release,
4017 	.write = spr_wm_latency_write
4018 };
4019 
4020 static const struct file_operations i915_cur_wm_latency_fops = {
4021 	.owner = THIS_MODULE,
4022 	.open = cur_wm_latency_open,
4023 	.read = seq_read,
4024 	.llseek = seq_lseek,
4025 	.release = single_release,
4026 	.write = cur_wm_latency_write
4027 };
4028 
4029 static int
4030 i915_wedged_get(void *data, u64 *val)
4031 {
4032 	struct drm_i915_private *dev_priv = data;
4033 
4034 	*val = i915_terminally_wedged(&dev_priv->gpu_error);
4035 
4036 	return 0;
4037 }
4038 
4039 static int
4040 i915_wedged_set(void *data, u64 val)
4041 {
4042 	struct drm_i915_private *i915 = data;
4043 	struct intel_engine_cs *engine;
4044 	unsigned int tmp;
4045 
4046 	/*
4047 	 * There is no safeguard against this debugfs entry colliding
4048 	 * with the hangcheck calling same i915_handle_error() in
4049 	 * parallel, causing an explosion. For now we assume that the
4050 	 * test harness is responsible enough not to inject gpu hangs
4051 	 * while it is writing to 'i915_wedged'
4052 	 */
4053 
4054 	if (i915_reset_backoff(&i915->gpu_error))
4055 		return -EAGAIN;
4056 
4057 	for_each_engine_masked(engine, i915, val, tmp) {
4058 		engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4059 		engine->hangcheck.stalled = true;
4060 	}
4061 
4062 	i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4063 			  "Manually set wedged engine mask = %llx", val);
4064 
4065 	wait_on_bit(&i915->gpu_error.flags,
4066 		    I915_RESET_HANDOFF,
4067 		    TASK_UNINTERRUPTIBLE);
4068 
4069 	return 0;
4070 }
4071 
4072 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4073 			i915_wedged_get, i915_wedged_set,
4074 			"%llu\n");
4075 
4076 static int
4077 fault_irq_set(struct drm_i915_private *i915,
4078 	      unsigned long *irq,
4079 	      unsigned long val)
4080 {
4081 	int err;
4082 
4083 	err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4084 	if (err)
4085 		return err;
4086 
4087 	err = i915_gem_wait_for_idle(i915,
4088 				     I915_WAIT_LOCKED |
4089 				     I915_WAIT_INTERRUPTIBLE);
4090 	if (err)
4091 		goto err_unlock;
4092 
4093 	*irq = val;
4094 	mutex_unlock(&i915->drm.struct_mutex);
4095 
4096 	/* Flush idle worker to disarm irq */
4097 	drain_delayed_work(&i915->gt.idle_work);
4098 
4099 	return 0;
4100 
4101 err_unlock:
4102 	mutex_unlock(&i915->drm.struct_mutex);
4103 	return err;
4104 }
4105 
4106 static int
4107 i915_ring_missed_irq_get(void *data, u64 *val)
4108 {
4109 	struct drm_i915_private *dev_priv = data;
4110 
4111 	*val = dev_priv->gpu_error.missed_irq_rings;
4112 	return 0;
4113 }
4114 
4115 static int
4116 i915_ring_missed_irq_set(void *data, u64 val)
4117 {
4118 	struct drm_i915_private *i915 = data;
4119 
4120 	return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4121 }
4122 
4123 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4124 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4125 			"0x%08llx\n");
4126 
4127 static int
4128 i915_ring_test_irq_get(void *data, u64 *val)
4129 {
4130 	struct drm_i915_private *dev_priv = data;
4131 
4132 	*val = dev_priv->gpu_error.test_irq_rings;
4133 
4134 	return 0;
4135 }
4136 
4137 static int
4138 i915_ring_test_irq_set(void *data, u64 val)
4139 {
4140 	struct drm_i915_private *i915 = data;
4141 
4142 	val &= INTEL_INFO(i915)->ring_mask;
4143 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4144 
4145 	return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4146 }
4147 
4148 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4149 			i915_ring_test_irq_get, i915_ring_test_irq_set,
4150 			"0x%08llx\n");
4151 
4152 #define DROP_UNBOUND	BIT(0)
4153 #define DROP_BOUND	BIT(1)
4154 #define DROP_RETIRE	BIT(2)
4155 #define DROP_ACTIVE	BIT(3)
4156 #define DROP_FREED	BIT(4)
4157 #define DROP_SHRINK_ALL	BIT(5)
4158 #define DROP_IDLE	BIT(6)
4159 #define DROP_ALL (DROP_UNBOUND	| \
4160 		  DROP_BOUND	| \
4161 		  DROP_RETIRE	| \
4162 		  DROP_ACTIVE	| \
4163 		  DROP_FREED	| \
4164 		  DROP_SHRINK_ALL |\
4165 		  DROP_IDLE)
4166 static int
4167 i915_drop_caches_get(void *data, u64 *val)
4168 {
4169 	*val = DROP_ALL;
4170 
4171 	return 0;
4172 }
4173 
4174 static int
4175 i915_drop_caches_set(void *data, u64 val)
4176 {
4177 	struct drm_i915_private *dev_priv = data;
4178 	struct drm_device *dev = &dev_priv->drm;
4179 	int ret = 0;
4180 
4181 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4182 		  val, val & DROP_ALL);
4183 
4184 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4185 	 * on ioctls on -EAGAIN. */
4186 	if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4187 		ret = mutex_lock_interruptible(&dev->struct_mutex);
4188 		if (ret)
4189 			return ret;
4190 
4191 		if (val & DROP_ACTIVE)
4192 			ret = i915_gem_wait_for_idle(dev_priv,
4193 						     I915_WAIT_INTERRUPTIBLE |
4194 						     I915_WAIT_LOCKED);
4195 
4196 		if (val & DROP_RETIRE)
4197 			i915_retire_requests(dev_priv);
4198 
4199 		mutex_unlock(&dev->struct_mutex);
4200 	}
4201 
4202 	fs_reclaim_acquire(GFP_KERNEL);
4203 	if (val & DROP_BOUND)
4204 		i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4205 
4206 	if (val & DROP_UNBOUND)
4207 		i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4208 
4209 	if (val & DROP_SHRINK_ALL)
4210 		i915_gem_shrink_all(dev_priv);
4211 	fs_reclaim_release(GFP_KERNEL);
4212 
4213 	if (val & DROP_IDLE) {
4214 		do {
4215 			if (READ_ONCE(dev_priv->gt.active_requests))
4216 				flush_delayed_work(&dev_priv->gt.retire_work);
4217 			drain_delayed_work(&dev_priv->gt.idle_work);
4218 		} while (READ_ONCE(dev_priv->gt.awake));
4219 	}
4220 
4221 	if (val & DROP_FREED)
4222 		i915_gem_drain_freed_objects(dev_priv);
4223 
4224 	return ret;
4225 }
4226 
4227 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4228 			i915_drop_caches_get, i915_drop_caches_set,
4229 			"0x%08llx\n");
4230 
4231 static int
4232 i915_cache_sharing_get(void *data, u64 *val)
4233 {
4234 	struct drm_i915_private *dev_priv = data;
4235 	u32 snpcr;
4236 
4237 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4238 		return -ENODEV;
4239 
4240 	intel_runtime_pm_get(dev_priv);
4241 
4242 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4243 
4244 	intel_runtime_pm_put(dev_priv);
4245 
4246 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4247 
4248 	return 0;
4249 }
4250 
4251 static int
4252 i915_cache_sharing_set(void *data, u64 val)
4253 {
4254 	struct drm_i915_private *dev_priv = data;
4255 	u32 snpcr;
4256 
4257 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4258 		return -ENODEV;
4259 
4260 	if (val > 3)
4261 		return -EINVAL;
4262 
4263 	intel_runtime_pm_get(dev_priv);
4264 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4265 
4266 	/* Update the cache sharing policy here as well */
4267 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4268 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
4269 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4270 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4271 
4272 	intel_runtime_pm_put(dev_priv);
4273 	return 0;
4274 }
4275 
4276 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4277 			i915_cache_sharing_get, i915_cache_sharing_set,
4278 			"%llu\n");
4279 
4280 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4281 					  struct sseu_dev_info *sseu)
4282 {
4283 #define SS_MAX 2
4284 	const int ss_max = SS_MAX;
4285 	u32 sig1[SS_MAX], sig2[SS_MAX];
4286 	int ss;
4287 
4288 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4289 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4290 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4291 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4292 
4293 	for (ss = 0; ss < ss_max; ss++) {
4294 		unsigned int eu_cnt;
4295 
4296 		if (sig1[ss] & CHV_SS_PG_ENABLE)
4297 			/* skip disabled subslice */
4298 			continue;
4299 
4300 		sseu->slice_mask = BIT(0);
4301 		sseu->subslice_mask[0] |= BIT(ss);
4302 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4303 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4304 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4305 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4306 		sseu->eu_total += eu_cnt;
4307 		sseu->eu_per_subslice = max_t(unsigned int,
4308 					      sseu->eu_per_subslice, eu_cnt);
4309 	}
4310 #undef SS_MAX
4311 }
4312 
4313 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4314 				     struct sseu_dev_info *sseu)
4315 {
4316 #define SS_MAX 6
4317 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4318 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4319 	int s, ss;
4320 
4321 	for (s = 0; s < info->sseu.max_slices; s++) {
4322 		/*
4323 		 * FIXME: Valid SS Mask respects the spec and read
4324 		 * only valid bits for those registers, excluding reserverd
4325 		 * although this seems wrong because it would leave many
4326 		 * subslices without ACK.
4327 		 */
4328 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4329 			GEN10_PGCTL_VALID_SS_MASK(s);
4330 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4331 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4332 	}
4333 
4334 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4335 		     GEN9_PGCTL_SSA_EU19_ACK |
4336 		     GEN9_PGCTL_SSA_EU210_ACK |
4337 		     GEN9_PGCTL_SSA_EU311_ACK;
4338 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4339 		     GEN9_PGCTL_SSB_EU19_ACK |
4340 		     GEN9_PGCTL_SSB_EU210_ACK |
4341 		     GEN9_PGCTL_SSB_EU311_ACK;
4342 
4343 	for (s = 0; s < info->sseu.max_slices; s++) {
4344 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4345 			/* skip disabled slice */
4346 			continue;
4347 
4348 		sseu->slice_mask |= BIT(s);
4349 		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4350 
4351 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4352 			unsigned int eu_cnt;
4353 
4354 			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4355 				/* skip disabled subslice */
4356 				continue;
4357 
4358 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4359 					       eu_mask[ss % 2]);
4360 			sseu->eu_total += eu_cnt;
4361 			sseu->eu_per_subslice = max_t(unsigned int,
4362 						      sseu->eu_per_subslice,
4363 						      eu_cnt);
4364 		}
4365 	}
4366 #undef SS_MAX
4367 }
4368 
4369 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4370 				    struct sseu_dev_info *sseu)
4371 {
4372 #define SS_MAX 3
4373 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4374 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4375 	int s, ss;
4376 
4377 	for (s = 0; s < info->sseu.max_slices; s++) {
4378 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4379 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4380 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4381 	}
4382 
4383 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4384 		     GEN9_PGCTL_SSA_EU19_ACK |
4385 		     GEN9_PGCTL_SSA_EU210_ACK |
4386 		     GEN9_PGCTL_SSA_EU311_ACK;
4387 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4388 		     GEN9_PGCTL_SSB_EU19_ACK |
4389 		     GEN9_PGCTL_SSB_EU210_ACK |
4390 		     GEN9_PGCTL_SSB_EU311_ACK;
4391 
4392 	for (s = 0; s < info->sseu.max_slices; s++) {
4393 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4394 			/* skip disabled slice */
4395 			continue;
4396 
4397 		sseu->slice_mask |= BIT(s);
4398 
4399 		if (IS_GEN9_BC(dev_priv))
4400 			sseu->subslice_mask[s] =
4401 				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4402 
4403 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4404 			unsigned int eu_cnt;
4405 
4406 			if (IS_GEN9_LP(dev_priv)) {
4407 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4408 					/* skip disabled subslice */
4409 					continue;
4410 
4411 				sseu->subslice_mask[s] |= BIT(ss);
4412 			}
4413 
4414 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4415 					       eu_mask[ss%2]);
4416 			sseu->eu_total += eu_cnt;
4417 			sseu->eu_per_subslice = max_t(unsigned int,
4418 						      sseu->eu_per_subslice,
4419 						      eu_cnt);
4420 		}
4421 	}
4422 #undef SS_MAX
4423 }
4424 
4425 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4426 					 struct sseu_dev_info *sseu)
4427 {
4428 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4429 	int s;
4430 
4431 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4432 
4433 	if (sseu->slice_mask) {
4434 		sseu->eu_per_subslice =
4435 				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4436 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4437 			sseu->subslice_mask[s] =
4438 				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4439 		}
4440 		sseu->eu_total = sseu->eu_per_subslice *
4441 				 sseu_subslice_total(sseu);
4442 
4443 		/* subtract fused off EU(s) from enabled slice(s) */
4444 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4445 			u8 subslice_7eu =
4446 				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4447 
4448 			sseu->eu_total -= hweight8(subslice_7eu);
4449 		}
4450 	}
4451 }
4452 
4453 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4454 				 const struct sseu_dev_info *sseu)
4455 {
4456 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4457 	const char *type = is_available_info ? "Available" : "Enabled";
4458 	int s;
4459 
4460 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
4461 		   sseu->slice_mask);
4462 	seq_printf(m, "  %s Slice Total: %u\n", type,
4463 		   hweight8(sseu->slice_mask));
4464 	seq_printf(m, "  %s Subslice Total: %u\n", type,
4465 		   sseu_subslice_total(sseu));
4466 	for (s = 0; s < fls(sseu->slice_mask); s++) {
4467 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4468 			   s, hweight8(sseu->subslice_mask[s]));
4469 	}
4470 	seq_printf(m, "  %s EU Total: %u\n", type,
4471 		   sseu->eu_total);
4472 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4473 		   sseu->eu_per_subslice);
4474 
4475 	if (!is_available_info)
4476 		return;
4477 
4478 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4479 	if (HAS_POOLED_EU(dev_priv))
4480 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4481 
4482 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4483 		   yesno(sseu->has_slice_pg));
4484 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4485 		   yesno(sseu->has_subslice_pg));
4486 	seq_printf(m, "  Has EU Power Gating: %s\n",
4487 		   yesno(sseu->has_eu_pg));
4488 }
4489 
4490 static int i915_sseu_status(struct seq_file *m, void *unused)
4491 {
4492 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4493 	struct sseu_dev_info sseu;
4494 
4495 	if (INTEL_GEN(dev_priv) < 8)
4496 		return -ENODEV;
4497 
4498 	seq_puts(m, "SSEU Device Info\n");
4499 	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4500 
4501 	seq_puts(m, "SSEU Device Status\n");
4502 	memset(&sseu, 0, sizeof(sseu));
4503 	sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4504 	sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4505 	sseu.max_eus_per_subslice =
4506 		INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4507 
4508 	intel_runtime_pm_get(dev_priv);
4509 
4510 	if (IS_CHERRYVIEW(dev_priv)) {
4511 		cherryview_sseu_device_status(dev_priv, &sseu);
4512 	} else if (IS_BROADWELL(dev_priv)) {
4513 		broadwell_sseu_device_status(dev_priv, &sseu);
4514 	} else if (IS_GEN9(dev_priv)) {
4515 		gen9_sseu_device_status(dev_priv, &sseu);
4516 	} else if (INTEL_GEN(dev_priv) >= 10) {
4517 		gen10_sseu_device_status(dev_priv, &sseu);
4518 	}
4519 
4520 	intel_runtime_pm_put(dev_priv);
4521 
4522 	i915_print_sseu_info(m, false, &sseu);
4523 
4524 	return 0;
4525 }
4526 
4527 static int i915_forcewake_open(struct inode *inode, struct file *file)
4528 {
4529 	struct drm_i915_private *i915 = inode->i_private;
4530 
4531 	if (INTEL_GEN(i915) < 6)
4532 		return 0;
4533 
4534 	intel_runtime_pm_get(i915);
4535 	intel_uncore_forcewake_user_get(i915);
4536 
4537 	return 0;
4538 }
4539 
4540 static int i915_forcewake_release(struct inode *inode, struct file *file)
4541 {
4542 	struct drm_i915_private *i915 = inode->i_private;
4543 
4544 	if (INTEL_GEN(i915) < 6)
4545 		return 0;
4546 
4547 	intel_uncore_forcewake_user_put(i915);
4548 	intel_runtime_pm_put(i915);
4549 
4550 	return 0;
4551 }
4552 
4553 static const struct file_operations i915_forcewake_fops = {
4554 	.owner = THIS_MODULE,
4555 	.open = i915_forcewake_open,
4556 	.release = i915_forcewake_release,
4557 };
4558 
4559 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4560 {
4561 	struct drm_i915_private *dev_priv = m->private;
4562 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4563 
4564 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4565 	seq_printf(m, "Detected: %s\n",
4566 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4567 
4568 	return 0;
4569 }
4570 
4571 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4572 					const char __user *ubuf, size_t len,
4573 					loff_t *offp)
4574 {
4575 	struct seq_file *m = file->private_data;
4576 	struct drm_i915_private *dev_priv = m->private;
4577 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4578 	unsigned int new_threshold;
4579 	int i;
4580 	char *newline;
4581 	char tmp[16];
4582 
4583 	if (len >= sizeof(tmp))
4584 		return -EINVAL;
4585 
4586 	if (copy_from_user(tmp, ubuf, len))
4587 		return -EFAULT;
4588 
4589 	tmp[len] = '\0';
4590 
4591 	/* Strip newline, if any */
4592 	newline = strchr(tmp, '\n');
4593 	if (newline)
4594 		*newline = '\0';
4595 
4596 	if (strcmp(tmp, "reset") == 0)
4597 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4598 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4599 		return -EINVAL;
4600 
4601 	if (new_threshold > 0)
4602 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4603 			      new_threshold);
4604 	else
4605 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4606 
4607 	spin_lock_irq(&dev_priv->irq_lock);
4608 	hotplug->hpd_storm_threshold = new_threshold;
4609 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4610 	for_each_hpd_pin(i)
4611 		hotplug->stats[i].count = 0;
4612 	spin_unlock_irq(&dev_priv->irq_lock);
4613 
4614 	/* Re-enable hpd immediately if we were in an irq storm */
4615 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4616 
4617 	return len;
4618 }
4619 
4620 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4621 {
4622 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4623 }
4624 
4625 static const struct file_operations i915_hpd_storm_ctl_fops = {
4626 	.owner = THIS_MODULE,
4627 	.open = i915_hpd_storm_ctl_open,
4628 	.read = seq_read,
4629 	.llseek = seq_lseek,
4630 	.release = single_release,
4631 	.write = i915_hpd_storm_ctl_write
4632 };
4633 
4634 static int i915_drrs_ctl_set(void *data, u64 val)
4635 {
4636 	struct drm_i915_private *dev_priv = data;
4637 	struct drm_device *dev = &dev_priv->drm;
4638 	struct intel_crtc *intel_crtc;
4639 	struct intel_encoder *encoder;
4640 	struct intel_dp *intel_dp;
4641 
4642 	if (INTEL_GEN(dev_priv) < 7)
4643 		return -ENODEV;
4644 
4645 	drm_modeset_lock_all(dev);
4646 	for_each_intel_crtc(dev, intel_crtc) {
4647 		if (!intel_crtc->base.state->active ||
4648 					!intel_crtc->config->has_drrs)
4649 			continue;
4650 
4651 		for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4652 			if (encoder->type != INTEL_OUTPUT_EDP)
4653 				continue;
4654 
4655 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4656 						val ? "en" : "dis", val);
4657 
4658 			intel_dp = enc_to_intel_dp(&encoder->base);
4659 			if (val)
4660 				intel_edp_drrs_enable(intel_dp,
4661 							intel_crtc->config);
4662 			else
4663 				intel_edp_drrs_disable(intel_dp,
4664 							intel_crtc->config);
4665 		}
4666 	}
4667 	drm_modeset_unlock_all(dev);
4668 
4669 	return 0;
4670 }
4671 
4672 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4673 
4674 static ssize_t
4675 i915_fifo_underrun_reset_write(struct file *filp,
4676 			       const char __user *ubuf,
4677 			       size_t cnt, loff_t *ppos)
4678 {
4679 	struct drm_i915_private *dev_priv = filp->private_data;
4680 	struct intel_crtc *intel_crtc;
4681 	struct drm_device *dev = &dev_priv->drm;
4682 	int ret;
4683 	bool reset;
4684 
4685 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4686 	if (ret)
4687 		return ret;
4688 
4689 	if (!reset)
4690 		return cnt;
4691 
4692 	for_each_intel_crtc(dev, intel_crtc) {
4693 		struct drm_crtc_commit *commit;
4694 		struct intel_crtc_state *crtc_state;
4695 
4696 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4697 		if (ret)
4698 			return ret;
4699 
4700 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4701 		commit = crtc_state->base.commit;
4702 		if (commit) {
4703 			ret = wait_for_completion_interruptible(&commit->hw_done);
4704 			if (!ret)
4705 				ret = wait_for_completion_interruptible(&commit->flip_done);
4706 		}
4707 
4708 		if (!ret && crtc_state->base.active) {
4709 			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4710 				      pipe_name(intel_crtc->pipe));
4711 
4712 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4713 		}
4714 
4715 		drm_modeset_unlock(&intel_crtc->base.mutex);
4716 
4717 		if (ret)
4718 			return ret;
4719 	}
4720 
4721 	ret = intel_fbc_reset_underrun(dev_priv);
4722 	if (ret)
4723 		return ret;
4724 
4725 	return cnt;
4726 }
4727 
4728 static const struct file_operations i915_fifo_underrun_reset_ops = {
4729 	.owner = THIS_MODULE,
4730 	.open = simple_open,
4731 	.write = i915_fifo_underrun_reset_write,
4732 	.llseek = default_llseek,
4733 };
4734 
4735 static const struct drm_info_list i915_debugfs_list[] = {
4736 	{"i915_capabilities", i915_capabilities, 0},
4737 	{"i915_gem_objects", i915_gem_object_info, 0},
4738 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4739 	{"i915_gem_stolen", i915_gem_stolen_list_info },
4740 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4741 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4742 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4743 	{"i915_guc_info", i915_guc_info, 0},
4744 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4745 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4746 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4747 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4748 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4749 	{"i915_frequency_info", i915_frequency_info, 0},
4750 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4751 	{"i915_reset_info", i915_reset_info, 0},
4752 	{"i915_drpc_info", i915_drpc_info, 0},
4753 	{"i915_emon_status", i915_emon_status, 0},
4754 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4755 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4756 	{"i915_fbc_status", i915_fbc_status, 0},
4757 	{"i915_ips_status", i915_ips_status, 0},
4758 	{"i915_sr_status", i915_sr_status, 0},
4759 	{"i915_opregion", i915_opregion, 0},
4760 	{"i915_vbt", i915_vbt, 0},
4761 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4762 	{"i915_context_status", i915_context_status, 0},
4763 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4764 	{"i915_swizzle_info", i915_swizzle_info, 0},
4765 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4766 	{"i915_llc", i915_llc, 0},
4767 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4768 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
4769 	{"i915_energy_uJ", i915_energy_uJ, 0},
4770 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4771 	{"i915_power_domain_info", i915_power_domain_info, 0},
4772 	{"i915_dmc_info", i915_dmc_info, 0},
4773 	{"i915_display_info", i915_display_info, 0},
4774 	{"i915_engine_info", i915_engine_info, 0},
4775 	{"i915_rcs_topology", i915_rcs_topology, 0},
4776 	{"i915_shrinker_info", i915_shrinker_info, 0},
4777 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4778 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4779 	{"i915_wa_registers", i915_wa_registers, 0},
4780 	{"i915_ddb_info", i915_ddb_info, 0},
4781 	{"i915_sseu_status", i915_sseu_status, 0},
4782 	{"i915_drrs_status", i915_drrs_status, 0},
4783 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4784 };
4785 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4786 
4787 static const struct i915_debugfs_files {
4788 	const char *name;
4789 	const struct file_operations *fops;
4790 } i915_debugfs_files[] = {
4791 	{"i915_wedged", &i915_wedged_fops},
4792 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4793 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4794 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4795 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4796 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4797 	{"i915_error_state", &i915_error_state_fops},
4798 	{"i915_gpu_info", &i915_gpu_info_fops},
4799 #endif
4800 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4801 	{"i915_next_seqno", &i915_next_seqno_fops},
4802 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4803 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4804 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4805 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4806 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4807 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4808 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4809 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4810 	{"i915_guc_log_level", &i915_guc_log_level_fops},
4811 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4812 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4813 	{"i915_ipc_status", &i915_ipc_status_fops},
4814 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4815 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4816 };
4817 
4818 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4819 {
4820 	struct drm_minor *minor = dev_priv->drm.primary;
4821 	struct dentry *ent;
4822 	int ret, i;
4823 
4824 	ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4825 				  minor->debugfs_root, to_i915(minor->dev),
4826 				  &i915_forcewake_fops);
4827 	if (!ent)
4828 		return -ENOMEM;
4829 
4830 	ret = intel_pipe_crc_create(minor);
4831 	if (ret)
4832 		return ret;
4833 
4834 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4835 		ent = debugfs_create_file(i915_debugfs_files[i].name,
4836 					  S_IRUGO | S_IWUSR,
4837 					  minor->debugfs_root,
4838 					  to_i915(minor->dev),
4839 					  i915_debugfs_files[i].fops);
4840 		if (!ent)
4841 			return -ENOMEM;
4842 	}
4843 
4844 	return drm_debugfs_create_files(i915_debugfs_list,
4845 					I915_DEBUGFS_ENTRIES,
4846 					minor->debugfs_root, minor);
4847 }
4848 
4849 struct dpcd_block {
4850 	/* DPCD dump start address. */
4851 	unsigned int offset;
4852 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4853 	unsigned int end;
4854 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4855 	size_t size;
4856 	/* Only valid for eDP. */
4857 	bool edp;
4858 };
4859 
4860 static const struct dpcd_block i915_dpcd_debug[] = {
4861 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4862 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4863 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4864 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4865 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4866 	{ .offset = DP_SET_POWER },
4867 	{ .offset = DP_EDP_DPCD_REV },
4868 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4869 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4870 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4871 };
4872 
4873 static int i915_dpcd_show(struct seq_file *m, void *data)
4874 {
4875 	struct drm_connector *connector = m->private;
4876 	struct intel_dp *intel_dp =
4877 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4878 	uint8_t buf[16];
4879 	ssize_t err;
4880 	int i;
4881 
4882 	if (connector->status != connector_status_connected)
4883 		return -ENODEV;
4884 
4885 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4886 		const struct dpcd_block *b = &i915_dpcd_debug[i];
4887 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4888 
4889 		if (b->edp &&
4890 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4891 			continue;
4892 
4893 		/* low tech for now */
4894 		if (WARN_ON(size > sizeof(buf)))
4895 			continue;
4896 
4897 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4898 		if (err <= 0) {
4899 			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4900 				  size, b->offset, err);
4901 			continue;
4902 		}
4903 
4904 		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4905 	}
4906 
4907 	return 0;
4908 }
4909 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4910 
4911 static int i915_panel_show(struct seq_file *m, void *data)
4912 {
4913 	struct drm_connector *connector = m->private;
4914 	struct intel_dp *intel_dp =
4915 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4916 
4917 	if (connector->status != connector_status_connected)
4918 		return -ENODEV;
4919 
4920 	seq_printf(m, "Panel power up delay: %d\n",
4921 		   intel_dp->panel_power_up_delay);
4922 	seq_printf(m, "Panel power down delay: %d\n",
4923 		   intel_dp->panel_power_down_delay);
4924 	seq_printf(m, "Backlight on delay: %d\n",
4925 		   intel_dp->backlight_on_delay);
4926 	seq_printf(m, "Backlight off delay: %d\n",
4927 		   intel_dp->backlight_off_delay);
4928 
4929 	return 0;
4930 }
4931 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4932 
4933 /**
4934  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4935  * @connector: pointer to a registered drm_connector
4936  *
4937  * Cleanup will be done by drm_connector_unregister() through a call to
4938  * drm_debugfs_connector_remove().
4939  *
4940  * Returns 0 on success, negative error codes on error.
4941  */
4942 int i915_debugfs_connector_add(struct drm_connector *connector)
4943 {
4944 	struct dentry *root = connector->debugfs_entry;
4945 
4946 	/* The connector must have been registered beforehands. */
4947 	if (!root)
4948 		return -ENODEV;
4949 
4950 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4951 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4952 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4953 				    connector, &i915_dpcd_fops);
4954 
4955 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4956 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4957 				    connector, &i915_panel_fops);
4958 
4959 	return 0;
4960 }
4961