1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34 
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37 	return to_i915(node->minor->dev);
38 }
39 
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
44 	struct drm_printer p = drm_seq_file_printer(m);
45 
46 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47 	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49 
50 	intel_device_info_dump_flags(info, &p);
51 	intel_device_info_dump_runtime(info, &p);
52 	intel_driver_caps_print(&dev_priv->caps, &p);
53 
54 	kernel_param_lock(THIS_MODULE);
55 	i915_params_dump(&i915_modparams, &p);
56 	kernel_param_unlock(THIS_MODULE);
57 
58 	return 0;
59 }
60 
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63 	return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65 
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68 	return obj->pin_global ? 'p' : ' ';
69 }
70 
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73 	switch (i915_gem_object_get_tiling(obj)) {
74 	default:
75 	case I915_TILING_NONE: return ' ';
76 	case I915_TILING_X: return 'X';
77 	case I915_TILING_Y: return 'Y';
78 	}
79 }
80 
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83 	return obj->userfault_count ? 'g' : ' ';
84 }
85 
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88 	return obj->mm.mapping ? 'M' : ' ';
89 }
90 
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93 	u64 size = 0;
94 	struct i915_vma *vma;
95 
96 	for_each_ggtt_vma(vma, obj) {
97 		if (drm_mm_node_allocated(&vma->node))
98 			size += vma->node.size;
99 	}
100 
101 	return size;
102 }
103 
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107 	size_t x = 0;
108 
109 	switch (page_sizes) {
110 	case 0:
111 		return "";
112 	case I915_GTT_PAGE_SIZE_4K:
113 		return "4K";
114 	case I915_GTT_PAGE_SIZE_64K:
115 		return "64K";
116 	case I915_GTT_PAGE_SIZE_2M:
117 		return "2M";
118 	default:
119 		if (!buf)
120 			return "M";
121 
122 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 			x += snprintf(buf + x, len - x, "2M, ");
124 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 			x += snprintf(buf + x, len - x, "64K, ");
126 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 			x += snprintf(buf + x, len - x, "4K, ");
128 		buf[x-2] = '\0';
129 
130 		return buf;
131 	}
132 }
133 
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138 	struct intel_engine_cs *engine;
139 	struct i915_vma *vma;
140 	unsigned int frontbuffer_bits;
141 	int pin_count = 0;
142 
143 	lockdep_assert_held(&obj->base.dev->struct_mutex);
144 
145 	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146 		   &obj->base,
147 		   get_active_flag(obj),
148 		   get_pin_flag(obj),
149 		   get_tiling_flag(obj),
150 		   get_global_flag(obj),
151 		   get_pin_mapped_flag(obj),
152 		   obj->base.size / 1024,
153 		   obj->read_domains,
154 		   obj->write_domain,
155 		   i915_cache_level_str(dev_priv, obj->cache_level),
156 		   obj->mm.dirty ? " dirty" : "",
157 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158 	if (obj->base.name)
159 		seq_printf(m, " (name: %d)", obj->base.name);
160 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
161 		if (i915_vma_is_pinned(vma))
162 			pin_count++;
163 	}
164 	seq_printf(m, " (pinned x %d)", pin_count);
165 	if (obj->pin_global)
166 		seq_printf(m, " (global)");
167 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
168 		if (!drm_mm_node_allocated(&vma->node))
169 			continue;
170 
171 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
173 			   vma->node.start, vma->node.size,
174 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175 		if (i915_vma_is_ggtt(vma)) {
176 			switch (vma->ggtt_view.type) {
177 			case I915_GGTT_VIEW_NORMAL:
178 				seq_puts(m, ", normal");
179 				break;
180 
181 			case I915_GGTT_VIEW_PARTIAL:
182 				seq_printf(m, ", partial [%08llx+%x]",
183 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
185 				break;
186 
187 			case I915_GGTT_VIEW_ROTATED:
188 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189 					   vma->ggtt_view.rotated.plane[0].width,
190 					   vma->ggtt_view.rotated.plane[0].height,
191 					   vma->ggtt_view.rotated.plane[0].stride,
192 					   vma->ggtt_view.rotated.plane[0].offset,
193 					   vma->ggtt_view.rotated.plane[1].width,
194 					   vma->ggtt_view.rotated.plane[1].height,
195 					   vma->ggtt_view.rotated.plane[1].stride,
196 					   vma->ggtt_view.rotated.plane[1].offset);
197 				break;
198 
199 			default:
200 				MISSING_CASE(vma->ggtt_view.type);
201 				break;
202 			}
203 		}
204 		if (vma->fence)
205 			seq_printf(m, " , fence: %d%s",
206 				   vma->fence->id,
207 				   i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208 		seq_puts(m, ")");
209 	}
210 	if (obj->stolen)
211 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212 
213 	engine = i915_gem_object_last_write_engine(obj);
214 	if (engine)
215 		seq_printf(m, " (%s)", engine->name);
216 
217 	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 	if (frontbuffer_bits)
219 		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221 
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224 	const struct drm_i915_gem_object *a =
225 		*(const struct drm_i915_gem_object **)A;
226 	const struct drm_i915_gem_object *b =
227 		*(const struct drm_i915_gem_object **)B;
228 
229 	if (a->stolen->start < b->stolen->start)
230 		return -1;
231 	if (a->stolen->start > b->stolen->start)
232 		return 1;
233 	return 0;
234 }
235 
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 	struct drm_device *dev = &dev_priv->drm;
240 	struct drm_i915_gem_object **objects;
241 	struct drm_i915_gem_object *obj;
242 	u64 total_obj_size, total_gtt_size;
243 	unsigned long total, count, n;
244 	int ret;
245 
246 	total = READ_ONCE(dev_priv->mm.object_count);
247 	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248 	if (!objects)
249 		return -ENOMEM;
250 
251 	ret = mutex_lock_interruptible(&dev->struct_mutex);
252 	if (ret)
253 		goto out;
254 
255 	total_obj_size = total_gtt_size = count = 0;
256 
257 	spin_lock(&dev_priv->mm.obj_lock);
258 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259 		if (count == total)
260 			break;
261 
262 		if (obj->stolen == NULL)
263 			continue;
264 
265 		objects[count++] = obj;
266 		total_obj_size += obj->base.size;
267 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268 
269 	}
270 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271 		if (count == total)
272 			break;
273 
274 		if (obj->stolen == NULL)
275 			continue;
276 
277 		objects[count++] = obj;
278 		total_obj_size += obj->base.size;
279 	}
280 	spin_unlock(&dev_priv->mm.obj_lock);
281 
282 	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283 
284 	seq_puts(m, "Stolen:\n");
285 	for (n = 0; n < count; n++) {
286 		seq_puts(m, "   ");
287 		describe_obj(m, objects[n]);
288 		seq_putc(m, '\n');
289 	}
290 	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291 		   count, total_obj_size, total_gtt_size);
292 
293 	mutex_unlock(&dev->struct_mutex);
294 out:
295 	kvfree(objects);
296 	return ret;
297 }
298 
299 struct file_stats {
300 	struct drm_i915_file_private *file_priv;
301 	unsigned long count;
302 	u64 total, unbound;
303 	u64 global, shared;
304 	u64 active, inactive;
305 };
306 
307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309 	struct drm_i915_gem_object *obj = ptr;
310 	struct file_stats *stats = data;
311 	struct i915_vma *vma;
312 
313 	lockdep_assert_held(&obj->base.dev->struct_mutex);
314 
315 	stats->count++;
316 	stats->total += obj->base.size;
317 	if (!obj->bind_count)
318 		stats->unbound += obj->base.size;
319 	if (obj->base.name || obj->base.dma_buf)
320 		stats->shared += obj->base.size;
321 
322 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 		if (!drm_mm_node_allocated(&vma->node))
324 			continue;
325 
326 		if (i915_vma_is_ggtt(vma)) {
327 			stats->global += vma->node.size;
328 		} else {
329 			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330 
331 			if (ppgtt->base.file != stats->file_priv)
332 				continue;
333 		}
334 
335 		if (i915_vma_is_active(vma))
336 			stats->active += vma->node.size;
337 		else
338 			stats->inactive += vma->node.size;
339 	}
340 
341 	return 0;
342 }
343 
344 #define print_file_stats(m, name, stats) do { \
345 	if (stats.count) \
346 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347 			   name, \
348 			   stats.count, \
349 			   stats.total, \
350 			   stats.active, \
351 			   stats.inactive, \
352 			   stats.global, \
353 			   stats.shared, \
354 			   stats.unbound); \
355 } while (0)
356 
357 static void print_batch_pool_stats(struct seq_file *m,
358 				   struct drm_i915_private *dev_priv)
359 {
360 	struct drm_i915_gem_object *obj;
361 	struct file_stats stats;
362 	struct intel_engine_cs *engine;
363 	enum intel_engine_id id;
364 	int j;
365 
366 	memset(&stats, 0, sizeof(stats));
367 
368 	for_each_engine(engine, dev_priv, id) {
369 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370 			list_for_each_entry(obj,
371 					    &engine->batch_pool.cache_list[j],
372 					    batch_pool_link)
373 				per_file_stats(0, obj, &stats);
374 		}
375 	}
376 
377 	print_file_stats(m, "[k]batch pool", stats);
378 }
379 
380 static int per_file_ctx_stats(int id, void *ptr, void *data)
381 {
382 	struct i915_gem_context *ctx = ptr;
383 	int n;
384 
385 	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
386 		if (ctx->engine[n].state)
387 			per_file_stats(0, ctx->engine[n].state->obj, data);
388 		if (ctx->engine[n].ring)
389 			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
390 	}
391 
392 	return 0;
393 }
394 
395 static void print_context_stats(struct seq_file *m,
396 				struct drm_i915_private *dev_priv)
397 {
398 	struct drm_device *dev = &dev_priv->drm;
399 	struct file_stats stats;
400 	struct drm_file *file;
401 
402 	memset(&stats, 0, sizeof(stats));
403 
404 	mutex_lock(&dev->struct_mutex);
405 	if (dev_priv->kernel_context)
406 		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
407 
408 	list_for_each_entry(file, &dev->filelist, lhead) {
409 		struct drm_i915_file_private *fpriv = file->driver_priv;
410 		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
411 	}
412 	mutex_unlock(&dev->struct_mutex);
413 
414 	print_file_stats(m, "[k]contexts", stats);
415 }
416 
417 static int i915_gem_object_info(struct seq_file *m, void *data)
418 {
419 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
420 	struct drm_device *dev = &dev_priv->drm;
421 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
422 	u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
423 	u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
424 	struct drm_i915_gem_object *obj;
425 	unsigned int page_sizes = 0;
426 	struct drm_file *file;
427 	char buf[80];
428 	int ret;
429 
430 	ret = mutex_lock_interruptible(&dev->struct_mutex);
431 	if (ret)
432 		return ret;
433 
434 	seq_printf(m, "%u objects, %llu bytes\n",
435 		   dev_priv->mm.object_count,
436 		   dev_priv->mm.object_memory);
437 
438 	size = count = 0;
439 	mapped_size = mapped_count = 0;
440 	purgeable_size = purgeable_count = 0;
441 	huge_size = huge_count = 0;
442 
443 	spin_lock(&dev_priv->mm.obj_lock);
444 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
445 		size += obj->base.size;
446 		++count;
447 
448 		if (obj->mm.madv == I915_MADV_DONTNEED) {
449 			purgeable_size += obj->base.size;
450 			++purgeable_count;
451 		}
452 
453 		if (obj->mm.mapping) {
454 			mapped_count++;
455 			mapped_size += obj->base.size;
456 		}
457 
458 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
459 			huge_count++;
460 			huge_size += obj->base.size;
461 			page_sizes |= obj->mm.page_sizes.sg;
462 		}
463 	}
464 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
465 
466 	size = count = dpy_size = dpy_count = 0;
467 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
468 		size += obj->base.size;
469 		++count;
470 
471 		if (obj->pin_global) {
472 			dpy_size += obj->base.size;
473 			++dpy_count;
474 		}
475 
476 		if (obj->mm.madv == I915_MADV_DONTNEED) {
477 			purgeable_size += obj->base.size;
478 			++purgeable_count;
479 		}
480 
481 		if (obj->mm.mapping) {
482 			mapped_count++;
483 			mapped_size += obj->base.size;
484 		}
485 
486 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
487 			huge_count++;
488 			huge_size += obj->base.size;
489 			page_sizes |= obj->mm.page_sizes.sg;
490 		}
491 	}
492 	spin_unlock(&dev_priv->mm.obj_lock);
493 
494 	seq_printf(m, "%u bound objects, %llu bytes\n",
495 		   count, size);
496 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
497 		   purgeable_count, purgeable_size);
498 	seq_printf(m, "%u mapped objects, %llu bytes\n",
499 		   mapped_count, mapped_size);
500 	seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
501 		   huge_count,
502 		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
503 		   huge_size);
504 	seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
505 		   dpy_count, dpy_size);
506 
507 	seq_printf(m, "%llu [%pa] gtt total\n",
508 		   ggtt->base.total, &ggtt->mappable_end);
509 	seq_printf(m, "Supported page sizes: %s\n",
510 		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
511 					buf, sizeof(buf)));
512 
513 	seq_putc(m, '\n');
514 	print_batch_pool_stats(m, dev_priv);
515 	mutex_unlock(&dev->struct_mutex);
516 
517 	mutex_lock(&dev->filelist_mutex);
518 	print_context_stats(m, dev_priv);
519 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
520 		struct file_stats stats;
521 		struct drm_i915_file_private *file_priv = file->driver_priv;
522 		struct i915_request *request;
523 		struct task_struct *task;
524 
525 		mutex_lock(&dev->struct_mutex);
526 
527 		memset(&stats, 0, sizeof(stats));
528 		stats.file_priv = file->driver_priv;
529 		spin_lock(&file->table_lock);
530 		idr_for_each(&file->object_idr, per_file_stats, &stats);
531 		spin_unlock(&file->table_lock);
532 		/*
533 		 * Although we have a valid reference on file->pid, that does
534 		 * not guarantee that the task_struct who called get_pid() is
535 		 * still alive (e.g. get_pid(current) => fork() => exit()).
536 		 * Therefore, we need to protect this ->comm access using RCU.
537 		 */
538 		request = list_first_entry_or_null(&file_priv->mm.request_list,
539 						   struct i915_request,
540 						   client_link);
541 		rcu_read_lock();
542 		task = pid_task(request && request->ctx->pid ?
543 				request->ctx->pid : file->pid,
544 				PIDTYPE_PID);
545 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
546 		rcu_read_unlock();
547 
548 		mutex_unlock(&dev->struct_mutex);
549 	}
550 	mutex_unlock(&dev->filelist_mutex);
551 
552 	return 0;
553 }
554 
555 static int i915_gem_gtt_info(struct seq_file *m, void *data)
556 {
557 	struct drm_info_node *node = m->private;
558 	struct drm_i915_private *dev_priv = node_to_i915(node);
559 	struct drm_device *dev = &dev_priv->drm;
560 	struct drm_i915_gem_object **objects;
561 	struct drm_i915_gem_object *obj;
562 	u64 total_obj_size, total_gtt_size;
563 	unsigned long nobject, n;
564 	int count, ret;
565 
566 	nobject = READ_ONCE(dev_priv->mm.object_count);
567 	objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
568 	if (!objects)
569 		return -ENOMEM;
570 
571 	ret = mutex_lock_interruptible(&dev->struct_mutex);
572 	if (ret)
573 		return ret;
574 
575 	count = 0;
576 	spin_lock(&dev_priv->mm.obj_lock);
577 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
578 		objects[count++] = obj;
579 		if (count == nobject)
580 			break;
581 	}
582 	spin_unlock(&dev_priv->mm.obj_lock);
583 
584 	total_obj_size = total_gtt_size = 0;
585 	for (n = 0;  n < count; n++) {
586 		obj = objects[n];
587 
588 		seq_puts(m, "   ");
589 		describe_obj(m, obj);
590 		seq_putc(m, '\n');
591 		total_obj_size += obj->base.size;
592 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
593 	}
594 
595 	mutex_unlock(&dev->struct_mutex);
596 
597 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
598 		   count, total_obj_size, total_gtt_size);
599 	kvfree(objects);
600 
601 	return 0;
602 }
603 
604 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
605 {
606 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
607 	struct drm_device *dev = &dev_priv->drm;
608 	struct drm_i915_gem_object *obj;
609 	struct intel_engine_cs *engine;
610 	enum intel_engine_id id;
611 	int total = 0;
612 	int ret, j;
613 
614 	ret = mutex_lock_interruptible(&dev->struct_mutex);
615 	if (ret)
616 		return ret;
617 
618 	for_each_engine(engine, dev_priv, id) {
619 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
620 			int count;
621 
622 			count = 0;
623 			list_for_each_entry(obj,
624 					    &engine->batch_pool.cache_list[j],
625 					    batch_pool_link)
626 				count++;
627 			seq_printf(m, "%s cache[%d]: %d objects\n",
628 				   engine->name, j, count);
629 
630 			list_for_each_entry(obj,
631 					    &engine->batch_pool.cache_list[j],
632 					    batch_pool_link) {
633 				seq_puts(m, "   ");
634 				describe_obj(m, obj);
635 				seq_putc(m, '\n');
636 			}
637 
638 			total += count;
639 		}
640 	}
641 
642 	seq_printf(m, "total: %d\n", total);
643 
644 	mutex_unlock(&dev->struct_mutex);
645 
646 	return 0;
647 }
648 
649 static void gen8_display_interrupt_info(struct seq_file *m)
650 {
651 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
652 	int pipe;
653 
654 	for_each_pipe(dev_priv, pipe) {
655 		enum intel_display_power_domain power_domain;
656 
657 		power_domain = POWER_DOMAIN_PIPE(pipe);
658 		if (!intel_display_power_get_if_enabled(dev_priv,
659 							power_domain)) {
660 			seq_printf(m, "Pipe %c power disabled\n",
661 				   pipe_name(pipe));
662 			continue;
663 		}
664 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
665 			   pipe_name(pipe),
666 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
667 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
668 			   pipe_name(pipe),
669 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
670 		seq_printf(m, "Pipe %c IER:\t%08x\n",
671 			   pipe_name(pipe),
672 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
673 
674 		intel_display_power_put(dev_priv, power_domain);
675 	}
676 
677 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
678 		   I915_READ(GEN8_DE_PORT_IMR));
679 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
680 		   I915_READ(GEN8_DE_PORT_IIR));
681 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
682 		   I915_READ(GEN8_DE_PORT_IER));
683 
684 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
685 		   I915_READ(GEN8_DE_MISC_IMR));
686 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
687 		   I915_READ(GEN8_DE_MISC_IIR));
688 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
689 		   I915_READ(GEN8_DE_MISC_IER));
690 
691 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
692 		   I915_READ(GEN8_PCU_IMR));
693 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
694 		   I915_READ(GEN8_PCU_IIR));
695 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
696 		   I915_READ(GEN8_PCU_IER));
697 }
698 
699 static int i915_interrupt_info(struct seq_file *m, void *data)
700 {
701 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
702 	struct intel_engine_cs *engine;
703 	enum intel_engine_id id;
704 	int i, pipe;
705 
706 	intel_runtime_pm_get(dev_priv);
707 
708 	if (IS_CHERRYVIEW(dev_priv)) {
709 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
710 			   I915_READ(GEN8_MASTER_IRQ));
711 
712 		seq_printf(m, "Display IER:\t%08x\n",
713 			   I915_READ(VLV_IER));
714 		seq_printf(m, "Display IIR:\t%08x\n",
715 			   I915_READ(VLV_IIR));
716 		seq_printf(m, "Display IIR_RW:\t%08x\n",
717 			   I915_READ(VLV_IIR_RW));
718 		seq_printf(m, "Display IMR:\t%08x\n",
719 			   I915_READ(VLV_IMR));
720 		for_each_pipe(dev_priv, pipe) {
721 			enum intel_display_power_domain power_domain;
722 
723 			power_domain = POWER_DOMAIN_PIPE(pipe);
724 			if (!intel_display_power_get_if_enabled(dev_priv,
725 								power_domain)) {
726 				seq_printf(m, "Pipe %c power disabled\n",
727 					   pipe_name(pipe));
728 				continue;
729 			}
730 
731 			seq_printf(m, "Pipe %c stat:\t%08x\n",
732 				   pipe_name(pipe),
733 				   I915_READ(PIPESTAT(pipe)));
734 
735 			intel_display_power_put(dev_priv, power_domain);
736 		}
737 
738 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
739 		seq_printf(m, "Port hotplug:\t%08x\n",
740 			   I915_READ(PORT_HOTPLUG_EN));
741 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
742 			   I915_READ(VLV_DPFLIPSTAT));
743 		seq_printf(m, "DPINVGTT:\t%08x\n",
744 			   I915_READ(DPINVGTT));
745 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
746 
747 		for (i = 0; i < 4; i++) {
748 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
749 				   i, I915_READ(GEN8_GT_IMR(i)));
750 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
751 				   i, I915_READ(GEN8_GT_IIR(i)));
752 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
753 				   i, I915_READ(GEN8_GT_IER(i)));
754 		}
755 
756 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
757 			   I915_READ(GEN8_PCU_IMR));
758 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
759 			   I915_READ(GEN8_PCU_IIR));
760 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
761 			   I915_READ(GEN8_PCU_IER));
762 	} else if (INTEL_GEN(dev_priv) >= 11) {
763 		seq_printf(m, "Master Interrupt Control:  %08x\n",
764 			   I915_READ(GEN11_GFX_MSTR_IRQ));
765 
766 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
767 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
768 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
769 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
770 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
771 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
772 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
773 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
774 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
775 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
776 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
777 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
778 
779 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
780 			   I915_READ(GEN11_DISPLAY_INT_CTL));
781 
782 		gen8_display_interrupt_info(m);
783 	} else if (INTEL_GEN(dev_priv) >= 8) {
784 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
785 			   I915_READ(GEN8_MASTER_IRQ));
786 
787 		for (i = 0; i < 4; i++) {
788 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
789 				   i, I915_READ(GEN8_GT_IMR(i)));
790 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
791 				   i, I915_READ(GEN8_GT_IIR(i)));
792 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
793 				   i, I915_READ(GEN8_GT_IER(i)));
794 		}
795 
796 		gen8_display_interrupt_info(m);
797 	} else if (IS_VALLEYVIEW(dev_priv)) {
798 		seq_printf(m, "Display IER:\t%08x\n",
799 			   I915_READ(VLV_IER));
800 		seq_printf(m, "Display IIR:\t%08x\n",
801 			   I915_READ(VLV_IIR));
802 		seq_printf(m, "Display IIR_RW:\t%08x\n",
803 			   I915_READ(VLV_IIR_RW));
804 		seq_printf(m, "Display IMR:\t%08x\n",
805 			   I915_READ(VLV_IMR));
806 		for_each_pipe(dev_priv, pipe) {
807 			enum intel_display_power_domain power_domain;
808 
809 			power_domain = POWER_DOMAIN_PIPE(pipe);
810 			if (!intel_display_power_get_if_enabled(dev_priv,
811 								power_domain)) {
812 				seq_printf(m, "Pipe %c power disabled\n",
813 					   pipe_name(pipe));
814 				continue;
815 			}
816 
817 			seq_printf(m, "Pipe %c stat:\t%08x\n",
818 				   pipe_name(pipe),
819 				   I915_READ(PIPESTAT(pipe)));
820 			intel_display_power_put(dev_priv, power_domain);
821 		}
822 
823 		seq_printf(m, "Master IER:\t%08x\n",
824 			   I915_READ(VLV_MASTER_IER));
825 
826 		seq_printf(m, "Render IER:\t%08x\n",
827 			   I915_READ(GTIER));
828 		seq_printf(m, "Render IIR:\t%08x\n",
829 			   I915_READ(GTIIR));
830 		seq_printf(m, "Render IMR:\t%08x\n",
831 			   I915_READ(GTIMR));
832 
833 		seq_printf(m, "PM IER:\t\t%08x\n",
834 			   I915_READ(GEN6_PMIER));
835 		seq_printf(m, "PM IIR:\t\t%08x\n",
836 			   I915_READ(GEN6_PMIIR));
837 		seq_printf(m, "PM IMR:\t\t%08x\n",
838 			   I915_READ(GEN6_PMIMR));
839 
840 		seq_printf(m, "Port hotplug:\t%08x\n",
841 			   I915_READ(PORT_HOTPLUG_EN));
842 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
843 			   I915_READ(VLV_DPFLIPSTAT));
844 		seq_printf(m, "DPINVGTT:\t%08x\n",
845 			   I915_READ(DPINVGTT));
846 
847 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
848 		seq_printf(m, "Interrupt enable:    %08x\n",
849 			   I915_READ(IER));
850 		seq_printf(m, "Interrupt identity:  %08x\n",
851 			   I915_READ(IIR));
852 		seq_printf(m, "Interrupt mask:      %08x\n",
853 			   I915_READ(IMR));
854 		for_each_pipe(dev_priv, pipe)
855 			seq_printf(m, "Pipe %c stat:         %08x\n",
856 				   pipe_name(pipe),
857 				   I915_READ(PIPESTAT(pipe)));
858 	} else {
859 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
860 			   I915_READ(DEIER));
861 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
862 			   I915_READ(DEIIR));
863 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
864 			   I915_READ(DEIMR));
865 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
866 			   I915_READ(SDEIER));
867 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
868 			   I915_READ(SDEIIR));
869 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
870 			   I915_READ(SDEIMR));
871 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
872 			   I915_READ(GTIER));
873 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
874 			   I915_READ(GTIIR));
875 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
876 			   I915_READ(GTIMR));
877 	}
878 
879 	if (INTEL_GEN(dev_priv) >= 11) {
880 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
881 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
882 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
883 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
884 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
885 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
886 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
887 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
888 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
889 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
890 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
891 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
892 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
893 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
894 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
895 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
896 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
897 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
898 
899 	} else if (INTEL_GEN(dev_priv) >= 6) {
900 		for_each_engine(engine, dev_priv, id) {
901 			seq_printf(m,
902 				   "Graphics Interrupt mask (%s):	%08x\n",
903 				   engine->name, I915_READ_IMR(engine));
904 		}
905 	}
906 
907 	intel_runtime_pm_put(dev_priv);
908 
909 	return 0;
910 }
911 
912 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
913 {
914 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
915 	struct drm_device *dev = &dev_priv->drm;
916 	int i, ret;
917 
918 	ret = mutex_lock_interruptible(&dev->struct_mutex);
919 	if (ret)
920 		return ret;
921 
922 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
923 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
924 		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
925 
926 		seq_printf(m, "Fence %d, pin count = %d, object = ",
927 			   i, dev_priv->fence_regs[i].pin_count);
928 		if (!vma)
929 			seq_puts(m, "unused");
930 		else
931 			describe_obj(m, vma->obj);
932 		seq_putc(m, '\n');
933 	}
934 
935 	mutex_unlock(&dev->struct_mutex);
936 	return 0;
937 }
938 
939 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
940 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
941 			      size_t count, loff_t *pos)
942 {
943 	struct i915_gpu_state *error = file->private_data;
944 	struct drm_i915_error_state_buf str;
945 	ssize_t ret;
946 	loff_t tmp;
947 
948 	if (!error)
949 		return 0;
950 
951 	ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
952 	if (ret)
953 		return ret;
954 
955 	ret = i915_error_state_to_str(&str, error);
956 	if (ret)
957 		goto out;
958 
959 	tmp = 0;
960 	ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
961 	if (ret < 0)
962 		goto out;
963 
964 	*pos = str.start + ret;
965 out:
966 	i915_error_state_buf_release(&str);
967 	return ret;
968 }
969 
970 static int gpu_state_release(struct inode *inode, struct file *file)
971 {
972 	i915_gpu_state_put(file->private_data);
973 	return 0;
974 }
975 
976 static int i915_gpu_info_open(struct inode *inode, struct file *file)
977 {
978 	struct drm_i915_private *i915 = inode->i_private;
979 	struct i915_gpu_state *gpu;
980 
981 	intel_runtime_pm_get(i915);
982 	gpu = i915_capture_gpu_state(i915);
983 	intel_runtime_pm_put(i915);
984 	if (!gpu)
985 		return -ENOMEM;
986 
987 	file->private_data = gpu;
988 	return 0;
989 }
990 
991 static const struct file_operations i915_gpu_info_fops = {
992 	.owner = THIS_MODULE,
993 	.open = i915_gpu_info_open,
994 	.read = gpu_state_read,
995 	.llseek = default_llseek,
996 	.release = gpu_state_release,
997 };
998 
999 static ssize_t
1000 i915_error_state_write(struct file *filp,
1001 		       const char __user *ubuf,
1002 		       size_t cnt,
1003 		       loff_t *ppos)
1004 {
1005 	struct i915_gpu_state *error = filp->private_data;
1006 
1007 	if (!error)
1008 		return 0;
1009 
1010 	DRM_DEBUG_DRIVER("Resetting error state\n");
1011 	i915_reset_error_state(error->i915);
1012 
1013 	return cnt;
1014 }
1015 
1016 static int i915_error_state_open(struct inode *inode, struct file *file)
1017 {
1018 	file->private_data = i915_first_error_state(inode->i_private);
1019 	return 0;
1020 }
1021 
1022 static const struct file_operations i915_error_state_fops = {
1023 	.owner = THIS_MODULE,
1024 	.open = i915_error_state_open,
1025 	.read = gpu_state_read,
1026 	.write = i915_error_state_write,
1027 	.llseek = default_llseek,
1028 	.release = gpu_state_release,
1029 };
1030 #endif
1031 
1032 static int
1033 i915_next_seqno_set(void *data, u64 val)
1034 {
1035 	struct drm_i915_private *dev_priv = data;
1036 	struct drm_device *dev = &dev_priv->drm;
1037 	int ret;
1038 
1039 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1040 	if (ret)
1041 		return ret;
1042 
1043 	intel_runtime_pm_get(dev_priv);
1044 	ret = i915_gem_set_global_seqno(dev, val);
1045 	intel_runtime_pm_put(dev_priv);
1046 
1047 	mutex_unlock(&dev->struct_mutex);
1048 
1049 	return ret;
1050 }
1051 
1052 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1053 			NULL, i915_next_seqno_set,
1054 			"0x%llx\n");
1055 
1056 static int i915_frequency_info(struct seq_file *m, void *unused)
1057 {
1058 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1059 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1060 	int ret = 0;
1061 
1062 	intel_runtime_pm_get(dev_priv);
1063 
1064 	if (IS_GEN5(dev_priv)) {
1065 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1066 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1067 
1068 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1069 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1070 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1071 			   MEMSTAT_VID_SHIFT);
1072 		seq_printf(m, "Current P-state: %d\n",
1073 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1074 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1075 		u32 rpmodectl, freq_sts;
1076 
1077 		mutex_lock(&dev_priv->pcu_lock);
1078 
1079 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1080 		seq_printf(m, "Video Turbo Mode: %s\n",
1081 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1082 		seq_printf(m, "HW control enabled: %s\n",
1083 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1084 		seq_printf(m, "SW control enabled: %s\n",
1085 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1086 				  GEN6_RP_MEDIA_SW_MODE));
1087 
1088 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1089 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1090 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1091 
1092 		seq_printf(m, "actual GPU freq: %d MHz\n",
1093 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1094 
1095 		seq_printf(m, "current GPU freq: %d MHz\n",
1096 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1097 
1098 		seq_printf(m, "max GPU freq: %d MHz\n",
1099 			   intel_gpu_freq(dev_priv, rps->max_freq));
1100 
1101 		seq_printf(m, "min GPU freq: %d MHz\n",
1102 			   intel_gpu_freq(dev_priv, rps->min_freq));
1103 
1104 		seq_printf(m, "idle GPU freq: %d MHz\n",
1105 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1106 
1107 		seq_printf(m,
1108 			   "efficient (RPe) frequency: %d MHz\n",
1109 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1110 		mutex_unlock(&dev_priv->pcu_lock);
1111 	} else if (INTEL_GEN(dev_priv) >= 6) {
1112 		u32 rp_state_limits;
1113 		u32 gt_perf_status;
1114 		u32 rp_state_cap;
1115 		u32 rpmodectl, rpinclimit, rpdeclimit;
1116 		u32 rpstat, cagf, reqf;
1117 		u32 rpupei, rpcurup, rpprevup;
1118 		u32 rpdownei, rpcurdown, rpprevdown;
1119 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1120 		int max_freq;
1121 
1122 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1123 		if (IS_GEN9_LP(dev_priv)) {
1124 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1125 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1126 		} else {
1127 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1128 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1129 		}
1130 
1131 		/* RPSTAT1 is in the GT power well */
1132 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1133 
1134 		reqf = I915_READ(GEN6_RPNSWREQ);
1135 		if (INTEL_GEN(dev_priv) >= 9)
1136 			reqf >>= 23;
1137 		else {
1138 			reqf &= ~GEN6_TURBO_DISABLE;
1139 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1140 				reqf >>= 24;
1141 			else
1142 				reqf >>= 25;
1143 		}
1144 		reqf = intel_gpu_freq(dev_priv, reqf);
1145 
1146 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1147 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1148 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1149 
1150 		rpstat = I915_READ(GEN6_RPSTAT1);
1151 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1152 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1153 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1154 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1155 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1156 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1157 		cagf = intel_gpu_freq(dev_priv,
1158 				      intel_get_cagf(dev_priv, rpstat));
1159 
1160 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1161 
1162 		if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
1163 			pm_ier = I915_READ(GEN6_PMIER);
1164 			pm_imr = I915_READ(GEN6_PMIMR);
1165 			pm_isr = I915_READ(GEN6_PMISR);
1166 			pm_iir = I915_READ(GEN6_PMIIR);
1167 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1168 		} else {
1169 			pm_ier = I915_READ(GEN8_GT_IER(2));
1170 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1171 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1172 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1173 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1174 		}
1175 		seq_printf(m, "Video Turbo Mode: %s\n",
1176 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1177 		seq_printf(m, "HW control enabled: %s\n",
1178 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1179 		seq_printf(m, "SW control enabled: %s\n",
1180 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1181 				  GEN6_RP_MEDIA_SW_MODE));
1182 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1183 			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1184 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1185 			   rps->pm_intrmsk_mbz);
1186 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1187 		seq_printf(m, "Render p-state ratio: %d\n",
1188 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1189 		seq_printf(m, "Render p-state VID: %d\n",
1190 			   gt_perf_status & 0xff);
1191 		seq_printf(m, "Render p-state limit: %d\n",
1192 			   rp_state_limits & 0xff);
1193 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1194 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1195 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1196 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1197 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1198 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1199 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1200 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1201 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
1202 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1203 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
1204 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1205 		seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold);
1206 
1207 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1208 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1209 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1210 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1211 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1212 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1213 		seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold);
1214 
1215 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1216 			    rp_state_cap >> 16) & 0xff;
1217 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1218 			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1219 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1220 			   intel_gpu_freq(dev_priv, max_freq));
1221 
1222 		max_freq = (rp_state_cap & 0xff00) >> 8;
1223 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1224 			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1225 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1226 			   intel_gpu_freq(dev_priv, max_freq));
1227 
1228 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1229 			    rp_state_cap >> 0) & 0xff;
1230 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1231 			     IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1);
1232 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1233 			   intel_gpu_freq(dev_priv, max_freq));
1234 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1235 			   intel_gpu_freq(dev_priv, rps->max_freq));
1236 
1237 		seq_printf(m, "Current freq: %d MHz\n",
1238 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1239 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1240 		seq_printf(m, "Idle freq: %d MHz\n",
1241 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1242 		seq_printf(m, "Min freq: %d MHz\n",
1243 			   intel_gpu_freq(dev_priv, rps->min_freq));
1244 		seq_printf(m, "Boost freq: %d MHz\n",
1245 			   intel_gpu_freq(dev_priv, rps->boost_freq));
1246 		seq_printf(m, "Max freq: %d MHz\n",
1247 			   intel_gpu_freq(dev_priv, rps->max_freq));
1248 		seq_printf(m,
1249 			   "efficient (RPe) frequency: %d MHz\n",
1250 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1251 	} else {
1252 		seq_puts(m, "no P-state info available\n");
1253 	}
1254 
1255 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1256 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1257 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1258 
1259 	intel_runtime_pm_put(dev_priv);
1260 	return ret;
1261 }
1262 
1263 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1264 			       struct seq_file *m,
1265 			       struct intel_instdone *instdone)
1266 {
1267 	int slice;
1268 	int subslice;
1269 
1270 	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1271 		   instdone->instdone);
1272 
1273 	if (INTEL_GEN(dev_priv) <= 3)
1274 		return;
1275 
1276 	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1277 		   instdone->slice_common);
1278 
1279 	if (INTEL_GEN(dev_priv) <= 6)
1280 		return;
1281 
1282 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1283 		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1284 			   slice, subslice, instdone->sampler[slice][subslice]);
1285 
1286 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1287 		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1288 			   slice, subslice, instdone->row[slice][subslice]);
1289 }
1290 
1291 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1292 {
1293 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1294 	struct intel_engine_cs *engine;
1295 	u64 acthd[I915_NUM_ENGINES];
1296 	u32 seqno[I915_NUM_ENGINES];
1297 	struct intel_instdone instdone;
1298 	enum intel_engine_id id;
1299 
1300 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1301 		seq_puts(m, "Wedged\n");
1302 	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1303 		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1304 	if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1305 		seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1306 	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1307 		seq_puts(m, "Waiter holding struct mutex\n");
1308 	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1309 		seq_puts(m, "struct_mutex blocked for reset\n");
1310 
1311 	if (!i915_modparams.enable_hangcheck) {
1312 		seq_puts(m, "Hangcheck disabled\n");
1313 		return 0;
1314 	}
1315 
1316 	intel_runtime_pm_get(dev_priv);
1317 
1318 	for_each_engine(engine, dev_priv, id) {
1319 		acthd[id] = intel_engine_get_active_head(engine);
1320 		seqno[id] = intel_engine_get_seqno(engine);
1321 	}
1322 
1323 	intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1324 
1325 	intel_runtime_pm_put(dev_priv);
1326 
1327 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1328 		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1329 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1330 					    jiffies));
1331 	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1332 		seq_puts(m, "Hangcheck active, work pending\n");
1333 	else
1334 		seq_puts(m, "Hangcheck inactive\n");
1335 
1336 	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1337 
1338 	for_each_engine(engine, dev_priv, id) {
1339 		struct intel_breadcrumbs *b = &engine->breadcrumbs;
1340 		struct rb_node *rb;
1341 
1342 		seq_printf(m, "%s:\n", engine->name);
1343 		seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
1344 			   engine->hangcheck.seqno, seqno[id],
1345 			   intel_engine_last_submit(engine),
1346 			   engine->timeline->inflight_seqnos);
1347 		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
1348 			   yesno(intel_engine_has_waiter(engine)),
1349 			   yesno(test_bit(engine->id,
1350 					  &dev_priv->gpu_error.missed_irq_rings)),
1351 			   yesno(engine->hangcheck.stalled));
1352 
1353 		spin_lock_irq(&b->rb_lock);
1354 		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1355 			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1356 
1357 			seq_printf(m, "\t%s [%d] waiting for %x\n",
1358 				   w->tsk->comm, w->tsk->pid, w->seqno);
1359 		}
1360 		spin_unlock_irq(&b->rb_lock);
1361 
1362 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1363 			   (long long)engine->hangcheck.acthd,
1364 			   (long long)acthd[id]);
1365 		seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1366 			   hangcheck_action_to_str(engine->hangcheck.action),
1367 			   engine->hangcheck.action,
1368 			   jiffies_to_msecs(jiffies -
1369 					    engine->hangcheck.action_timestamp));
1370 
1371 		if (engine->id == RCS) {
1372 			seq_puts(m, "\tinstdone read =\n");
1373 
1374 			i915_instdone_info(dev_priv, m, &instdone);
1375 
1376 			seq_puts(m, "\tinstdone accu =\n");
1377 
1378 			i915_instdone_info(dev_priv, m,
1379 					   &engine->hangcheck.instdone);
1380 		}
1381 	}
1382 
1383 	return 0;
1384 }
1385 
1386 static int i915_reset_info(struct seq_file *m, void *unused)
1387 {
1388 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1389 	struct i915_gpu_error *error = &dev_priv->gpu_error;
1390 	struct intel_engine_cs *engine;
1391 	enum intel_engine_id id;
1392 
1393 	seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1394 
1395 	for_each_engine(engine, dev_priv, id) {
1396 		seq_printf(m, "%s = %u\n", engine->name,
1397 			   i915_reset_engine_count(error, engine));
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 static int ironlake_drpc_info(struct seq_file *m)
1404 {
1405 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1406 	u32 rgvmodectl, rstdbyctl;
1407 	u16 crstandvid;
1408 
1409 	rgvmodectl = I915_READ(MEMMODECTL);
1410 	rstdbyctl = I915_READ(RSTDBYCTL);
1411 	crstandvid = I915_READ16(CRSTANDVID);
1412 
1413 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1414 	seq_printf(m, "Boost freq: %d\n",
1415 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1416 		   MEMMODE_BOOST_FREQ_SHIFT);
1417 	seq_printf(m, "HW control enabled: %s\n",
1418 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1419 	seq_printf(m, "SW control enabled: %s\n",
1420 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1421 	seq_printf(m, "Gated voltage change: %s\n",
1422 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1423 	seq_printf(m, "Starting frequency: P%d\n",
1424 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1425 	seq_printf(m, "Max P-state: P%d\n",
1426 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1427 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1428 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1429 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1430 	seq_printf(m, "Render standby enabled: %s\n",
1431 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1432 	seq_puts(m, "Current RS state: ");
1433 	switch (rstdbyctl & RSX_STATUS_MASK) {
1434 	case RSX_STATUS_ON:
1435 		seq_puts(m, "on\n");
1436 		break;
1437 	case RSX_STATUS_RC1:
1438 		seq_puts(m, "RC1\n");
1439 		break;
1440 	case RSX_STATUS_RC1E:
1441 		seq_puts(m, "RC1E\n");
1442 		break;
1443 	case RSX_STATUS_RS1:
1444 		seq_puts(m, "RS1\n");
1445 		break;
1446 	case RSX_STATUS_RS2:
1447 		seq_puts(m, "RS2 (RC6)\n");
1448 		break;
1449 	case RSX_STATUS_RS3:
1450 		seq_puts(m, "RC3 (RC6+)\n");
1451 		break;
1452 	default:
1453 		seq_puts(m, "unknown\n");
1454 		break;
1455 	}
1456 
1457 	return 0;
1458 }
1459 
1460 static int i915_forcewake_domains(struct seq_file *m, void *data)
1461 {
1462 	struct drm_i915_private *i915 = node_to_i915(m->private);
1463 	struct intel_uncore_forcewake_domain *fw_domain;
1464 	unsigned int tmp;
1465 
1466 	seq_printf(m, "user.bypass_count = %u\n",
1467 		   i915->uncore.user_forcewake.count);
1468 
1469 	for_each_fw_domain(fw_domain, i915, tmp)
1470 		seq_printf(m, "%s.wake_count = %u\n",
1471 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1472 			   READ_ONCE(fw_domain->wake_count));
1473 
1474 	return 0;
1475 }
1476 
1477 static void print_rc6_res(struct seq_file *m,
1478 			  const char *title,
1479 			  const i915_reg_t reg)
1480 {
1481 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1482 
1483 	seq_printf(m, "%s %u (%llu us)\n",
1484 		   title, I915_READ(reg),
1485 		   intel_rc6_residency_us(dev_priv, reg));
1486 }
1487 
1488 static int vlv_drpc_info(struct seq_file *m)
1489 {
1490 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1491 	u32 rcctl1, pw_status;
1492 
1493 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1494 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1495 
1496 	seq_printf(m, "RC6 Enabled: %s\n",
1497 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1498 					GEN6_RC_CTL_EI_MODE(1))));
1499 	seq_printf(m, "Render Power Well: %s\n",
1500 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1501 	seq_printf(m, "Media Power Well: %s\n",
1502 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1503 
1504 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1505 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1506 
1507 	return i915_forcewake_domains(m, NULL);
1508 }
1509 
1510 static int gen6_drpc_info(struct seq_file *m)
1511 {
1512 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1513 	u32 gt_core_status, rcctl1, rc6vids = 0;
1514 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1515 
1516 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1517 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1518 
1519 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1520 	if (INTEL_GEN(dev_priv) >= 9) {
1521 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1522 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1523 	}
1524 
1525 	if (INTEL_GEN(dev_priv) <= 7) {
1526 		mutex_lock(&dev_priv->pcu_lock);
1527 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1528 				       &rc6vids);
1529 		mutex_unlock(&dev_priv->pcu_lock);
1530 	}
1531 
1532 	seq_printf(m, "RC1e Enabled: %s\n",
1533 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1534 	seq_printf(m, "RC6 Enabled: %s\n",
1535 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1536 	if (INTEL_GEN(dev_priv) >= 9) {
1537 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1538 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1539 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1540 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1541 	}
1542 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1543 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1544 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1545 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1546 	seq_puts(m, "Current RC state: ");
1547 	switch (gt_core_status & GEN6_RCn_MASK) {
1548 	case GEN6_RC0:
1549 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1550 			seq_puts(m, "Core Power Down\n");
1551 		else
1552 			seq_puts(m, "on\n");
1553 		break;
1554 	case GEN6_RC3:
1555 		seq_puts(m, "RC3\n");
1556 		break;
1557 	case GEN6_RC6:
1558 		seq_puts(m, "RC6\n");
1559 		break;
1560 	case GEN6_RC7:
1561 		seq_puts(m, "RC7\n");
1562 		break;
1563 	default:
1564 		seq_puts(m, "Unknown\n");
1565 		break;
1566 	}
1567 
1568 	seq_printf(m, "Core Power Down: %s\n",
1569 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1570 	if (INTEL_GEN(dev_priv) >= 9) {
1571 		seq_printf(m, "Render Power Well: %s\n",
1572 			(gen9_powergate_status &
1573 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1574 		seq_printf(m, "Media Power Well: %s\n",
1575 			(gen9_powergate_status &
1576 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1577 	}
1578 
1579 	/* Not exactly sure what this is */
1580 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1581 		      GEN6_GT_GFX_RC6_LOCKED);
1582 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1583 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1584 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1585 
1586 	if (INTEL_GEN(dev_priv) <= 7) {
1587 		seq_printf(m, "RC6   voltage: %dmV\n",
1588 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1589 		seq_printf(m, "RC6+  voltage: %dmV\n",
1590 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1591 		seq_printf(m, "RC6++ voltage: %dmV\n",
1592 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1593 	}
1594 
1595 	return i915_forcewake_domains(m, NULL);
1596 }
1597 
1598 static int i915_drpc_info(struct seq_file *m, void *unused)
1599 {
1600 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1601 	int err;
1602 
1603 	intel_runtime_pm_get(dev_priv);
1604 
1605 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1606 		err = vlv_drpc_info(m);
1607 	else if (INTEL_GEN(dev_priv) >= 6)
1608 		err = gen6_drpc_info(m);
1609 	else
1610 		err = ironlake_drpc_info(m);
1611 
1612 	intel_runtime_pm_put(dev_priv);
1613 
1614 	return err;
1615 }
1616 
1617 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1618 {
1619 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1620 
1621 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1622 		   dev_priv->fb_tracking.busy_bits);
1623 
1624 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1625 		   dev_priv->fb_tracking.flip_bits);
1626 
1627 	return 0;
1628 }
1629 
1630 static int i915_fbc_status(struct seq_file *m, void *unused)
1631 {
1632 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1633 	struct intel_fbc *fbc = &dev_priv->fbc;
1634 
1635 	if (!HAS_FBC(dev_priv))
1636 		return -ENODEV;
1637 
1638 	intel_runtime_pm_get(dev_priv);
1639 	mutex_lock(&fbc->lock);
1640 
1641 	if (intel_fbc_is_active(dev_priv))
1642 		seq_puts(m, "FBC enabled\n");
1643 	else
1644 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1645 
1646 	if (fbc->work.scheduled)
1647 		seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n",
1648 			   fbc->work.scheduled_vblank,
1649 			   drm_crtc_vblank_count(&fbc->crtc->base));
1650 
1651 	if (intel_fbc_is_active(dev_priv)) {
1652 		u32 mask;
1653 
1654 		if (INTEL_GEN(dev_priv) >= 8)
1655 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1656 		else if (INTEL_GEN(dev_priv) >= 7)
1657 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1658 		else if (INTEL_GEN(dev_priv) >= 5)
1659 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1660 		else if (IS_G4X(dev_priv))
1661 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1662 		else
1663 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1664 							FBC_STAT_COMPRESSED);
1665 
1666 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1667 	}
1668 
1669 	mutex_unlock(&fbc->lock);
1670 	intel_runtime_pm_put(dev_priv);
1671 
1672 	return 0;
1673 }
1674 
1675 static int i915_fbc_false_color_get(void *data, u64 *val)
1676 {
1677 	struct drm_i915_private *dev_priv = data;
1678 
1679 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1680 		return -ENODEV;
1681 
1682 	*val = dev_priv->fbc.false_color;
1683 
1684 	return 0;
1685 }
1686 
1687 static int i915_fbc_false_color_set(void *data, u64 val)
1688 {
1689 	struct drm_i915_private *dev_priv = data;
1690 	u32 reg;
1691 
1692 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1693 		return -ENODEV;
1694 
1695 	mutex_lock(&dev_priv->fbc.lock);
1696 
1697 	reg = I915_READ(ILK_DPFC_CONTROL);
1698 	dev_priv->fbc.false_color = val;
1699 
1700 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1701 		   (reg | FBC_CTL_FALSE_COLOR) :
1702 		   (reg & ~FBC_CTL_FALSE_COLOR));
1703 
1704 	mutex_unlock(&dev_priv->fbc.lock);
1705 	return 0;
1706 }
1707 
1708 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1709 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1710 			"%llu\n");
1711 
1712 static int i915_ips_status(struct seq_file *m, void *unused)
1713 {
1714 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1715 
1716 	if (!HAS_IPS(dev_priv))
1717 		return -ENODEV;
1718 
1719 	intel_runtime_pm_get(dev_priv);
1720 
1721 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1722 		   yesno(i915_modparams.enable_ips));
1723 
1724 	if (INTEL_GEN(dev_priv) >= 8) {
1725 		seq_puts(m, "Currently: unknown\n");
1726 	} else {
1727 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1728 			seq_puts(m, "Currently: enabled\n");
1729 		else
1730 			seq_puts(m, "Currently: disabled\n");
1731 	}
1732 
1733 	intel_runtime_pm_put(dev_priv);
1734 
1735 	return 0;
1736 }
1737 
1738 static int i915_sr_status(struct seq_file *m, void *unused)
1739 {
1740 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1741 	bool sr_enabled = false;
1742 
1743 	intel_runtime_pm_get(dev_priv);
1744 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1745 
1746 	if (INTEL_GEN(dev_priv) >= 9)
1747 		/* no global SR status; inspect per-plane WM */;
1748 	else if (HAS_PCH_SPLIT(dev_priv))
1749 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1750 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1751 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1752 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1753 	else if (IS_I915GM(dev_priv))
1754 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1755 	else if (IS_PINEVIEW(dev_priv))
1756 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1757 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1758 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1759 
1760 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1761 	intel_runtime_pm_put(dev_priv);
1762 
1763 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1764 
1765 	return 0;
1766 }
1767 
1768 static int i915_emon_status(struct seq_file *m, void *unused)
1769 {
1770 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1771 	struct drm_device *dev = &dev_priv->drm;
1772 	unsigned long temp, chipset, gfx;
1773 	int ret;
1774 
1775 	if (!IS_GEN5(dev_priv))
1776 		return -ENODEV;
1777 
1778 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1779 	if (ret)
1780 		return ret;
1781 
1782 	temp = i915_mch_val(dev_priv);
1783 	chipset = i915_chipset_val(dev_priv);
1784 	gfx = i915_gfx_val(dev_priv);
1785 	mutex_unlock(&dev->struct_mutex);
1786 
1787 	seq_printf(m, "GMCH temp: %ld\n", temp);
1788 	seq_printf(m, "Chipset power: %ld\n", chipset);
1789 	seq_printf(m, "GFX power: %ld\n", gfx);
1790 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1791 
1792 	return 0;
1793 }
1794 
1795 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1796 {
1797 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1798 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1799 	int ret = 0;
1800 	int gpu_freq, ia_freq;
1801 	unsigned int max_gpu_freq, min_gpu_freq;
1802 
1803 	if (!HAS_LLC(dev_priv))
1804 		return -ENODEV;
1805 
1806 	intel_runtime_pm_get(dev_priv);
1807 
1808 	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1809 	if (ret)
1810 		goto out;
1811 
1812 	if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
1813 		/* Convert GT frequency to 50 HZ units */
1814 		min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER;
1815 		max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER;
1816 	} else {
1817 		min_gpu_freq = rps->min_freq_softlimit;
1818 		max_gpu_freq = rps->max_freq_softlimit;
1819 	}
1820 
1821 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1822 
1823 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1824 		ia_freq = gpu_freq;
1825 		sandybridge_pcode_read(dev_priv,
1826 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1827 				       &ia_freq);
1828 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1829 			   intel_gpu_freq(dev_priv, (gpu_freq *
1830 						     (IS_GEN9_BC(dev_priv) ||
1831 						      IS_CANNONLAKE(dev_priv) ?
1832 						      GEN9_FREQ_SCALER : 1))),
1833 			   ((ia_freq >> 0) & 0xff) * 100,
1834 			   ((ia_freq >> 8) & 0xff) * 100);
1835 	}
1836 
1837 	mutex_unlock(&dev_priv->pcu_lock);
1838 
1839 out:
1840 	intel_runtime_pm_put(dev_priv);
1841 	return ret;
1842 }
1843 
1844 static int i915_opregion(struct seq_file *m, void *unused)
1845 {
1846 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1847 	struct drm_device *dev = &dev_priv->drm;
1848 	struct intel_opregion *opregion = &dev_priv->opregion;
1849 	int ret;
1850 
1851 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1852 	if (ret)
1853 		goto out;
1854 
1855 	if (opregion->header)
1856 		seq_write(m, opregion->header, OPREGION_SIZE);
1857 
1858 	mutex_unlock(&dev->struct_mutex);
1859 
1860 out:
1861 	return 0;
1862 }
1863 
1864 static int i915_vbt(struct seq_file *m, void *unused)
1865 {
1866 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1867 
1868 	if (opregion->vbt)
1869 		seq_write(m, opregion->vbt, opregion->vbt_size);
1870 
1871 	return 0;
1872 }
1873 
1874 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1875 {
1876 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1877 	struct drm_device *dev = &dev_priv->drm;
1878 	struct intel_framebuffer *fbdev_fb = NULL;
1879 	struct drm_framebuffer *drm_fb;
1880 	int ret;
1881 
1882 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1883 	if (ret)
1884 		return ret;
1885 
1886 #ifdef CONFIG_DRM_FBDEV_EMULATION
1887 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1888 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1889 
1890 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1891 			   fbdev_fb->base.width,
1892 			   fbdev_fb->base.height,
1893 			   fbdev_fb->base.format->depth,
1894 			   fbdev_fb->base.format->cpp[0] * 8,
1895 			   fbdev_fb->base.modifier,
1896 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1897 		describe_obj(m, fbdev_fb->obj);
1898 		seq_putc(m, '\n');
1899 	}
1900 #endif
1901 
1902 	mutex_lock(&dev->mode_config.fb_lock);
1903 	drm_for_each_fb(drm_fb, dev) {
1904 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1905 		if (fb == fbdev_fb)
1906 			continue;
1907 
1908 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1909 			   fb->base.width,
1910 			   fb->base.height,
1911 			   fb->base.format->depth,
1912 			   fb->base.format->cpp[0] * 8,
1913 			   fb->base.modifier,
1914 			   drm_framebuffer_read_refcount(&fb->base));
1915 		describe_obj(m, fb->obj);
1916 		seq_putc(m, '\n');
1917 	}
1918 	mutex_unlock(&dev->mode_config.fb_lock);
1919 	mutex_unlock(&dev->struct_mutex);
1920 
1921 	return 0;
1922 }
1923 
1924 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1925 {
1926 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
1927 		   ring->space, ring->head, ring->tail);
1928 }
1929 
1930 static int i915_context_status(struct seq_file *m, void *unused)
1931 {
1932 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1933 	struct drm_device *dev = &dev_priv->drm;
1934 	struct intel_engine_cs *engine;
1935 	struct i915_gem_context *ctx;
1936 	enum intel_engine_id id;
1937 	int ret;
1938 
1939 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1940 	if (ret)
1941 		return ret;
1942 
1943 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1944 		seq_printf(m, "HW context %u ", ctx->hw_id);
1945 		if (ctx->pid) {
1946 			struct task_struct *task;
1947 
1948 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1949 			if (task) {
1950 				seq_printf(m, "(%s [%d]) ",
1951 					   task->comm, task->pid);
1952 				put_task_struct(task);
1953 			}
1954 		} else if (IS_ERR(ctx->file_priv)) {
1955 			seq_puts(m, "(deleted) ");
1956 		} else {
1957 			seq_puts(m, "(kernel) ");
1958 		}
1959 
1960 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1961 		seq_putc(m, '\n');
1962 
1963 		for_each_engine(engine, dev_priv, id) {
1964 			struct intel_context *ce = &ctx->engine[engine->id];
1965 
1966 			seq_printf(m, "%s: ", engine->name);
1967 			if (ce->state)
1968 				describe_obj(m, ce->state->obj);
1969 			if (ce->ring)
1970 				describe_ctx_ring(m, ce->ring);
1971 			seq_putc(m, '\n');
1972 		}
1973 
1974 		seq_putc(m, '\n');
1975 	}
1976 
1977 	mutex_unlock(&dev->struct_mutex);
1978 
1979 	return 0;
1980 }
1981 
1982 static const char *swizzle_string(unsigned swizzle)
1983 {
1984 	switch (swizzle) {
1985 	case I915_BIT_6_SWIZZLE_NONE:
1986 		return "none";
1987 	case I915_BIT_6_SWIZZLE_9:
1988 		return "bit9";
1989 	case I915_BIT_6_SWIZZLE_9_10:
1990 		return "bit9/bit10";
1991 	case I915_BIT_6_SWIZZLE_9_11:
1992 		return "bit9/bit11";
1993 	case I915_BIT_6_SWIZZLE_9_10_11:
1994 		return "bit9/bit10/bit11";
1995 	case I915_BIT_6_SWIZZLE_9_17:
1996 		return "bit9/bit17";
1997 	case I915_BIT_6_SWIZZLE_9_10_17:
1998 		return "bit9/bit10/bit17";
1999 	case I915_BIT_6_SWIZZLE_UNKNOWN:
2000 		return "unknown";
2001 	}
2002 
2003 	return "bug";
2004 }
2005 
2006 static int i915_swizzle_info(struct seq_file *m, void *data)
2007 {
2008 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2009 
2010 	intel_runtime_pm_get(dev_priv);
2011 
2012 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2013 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2014 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2015 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2016 
2017 	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2018 		seq_printf(m, "DDC = 0x%08x\n",
2019 			   I915_READ(DCC));
2020 		seq_printf(m, "DDC2 = 0x%08x\n",
2021 			   I915_READ(DCC2));
2022 		seq_printf(m, "C0DRB3 = 0x%04x\n",
2023 			   I915_READ16(C0DRB3));
2024 		seq_printf(m, "C1DRB3 = 0x%04x\n",
2025 			   I915_READ16(C1DRB3));
2026 	} else if (INTEL_GEN(dev_priv) >= 6) {
2027 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2028 			   I915_READ(MAD_DIMM_C0));
2029 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2030 			   I915_READ(MAD_DIMM_C1));
2031 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2032 			   I915_READ(MAD_DIMM_C2));
2033 		seq_printf(m, "TILECTL = 0x%08x\n",
2034 			   I915_READ(TILECTL));
2035 		if (INTEL_GEN(dev_priv) >= 8)
2036 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2037 				   I915_READ(GAMTARBMODE));
2038 		else
2039 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2040 				   I915_READ(ARB_MODE));
2041 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2042 			   I915_READ(DISP_ARB_CTL));
2043 	}
2044 
2045 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2046 		seq_puts(m, "L-shaped memory detected\n");
2047 
2048 	intel_runtime_pm_put(dev_priv);
2049 
2050 	return 0;
2051 }
2052 
2053 static int per_file_ctx(int id, void *ptr, void *data)
2054 {
2055 	struct i915_gem_context *ctx = ptr;
2056 	struct seq_file *m = data;
2057 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2058 
2059 	if (!ppgtt) {
2060 		seq_printf(m, "  no ppgtt for context %d\n",
2061 			   ctx->user_handle);
2062 		return 0;
2063 	}
2064 
2065 	if (i915_gem_context_is_default(ctx))
2066 		seq_puts(m, "  default context:\n");
2067 	else
2068 		seq_printf(m, "  context %d:\n", ctx->user_handle);
2069 	ppgtt->debug_dump(ppgtt, m);
2070 
2071 	return 0;
2072 }
2073 
2074 static void gen8_ppgtt_info(struct seq_file *m,
2075 			    struct drm_i915_private *dev_priv)
2076 {
2077 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2078 	struct intel_engine_cs *engine;
2079 	enum intel_engine_id id;
2080 	int i;
2081 
2082 	if (!ppgtt)
2083 		return;
2084 
2085 	for_each_engine(engine, dev_priv, id) {
2086 		seq_printf(m, "%s\n", engine->name);
2087 		for (i = 0; i < 4; i++) {
2088 			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2089 			pdp <<= 32;
2090 			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2091 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2092 		}
2093 	}
2094 }
2095 
2096 static void gen6_ppgtt_info(struct seq_file *m,
2097 			    struct drm_i915_private *dev_priv)
2098 {
2099 	struct intel_engine_cs *engine;
2100 	enum intel_engine_id id;
2101 
2102 	if (IS_GEN6(dev_priv))
2103 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2104 
2105 	for_each_engine(engine, dev_priv, id) {
2106 		seq_printf(m, "%s\n", engine->name);
2107 		if (IS_GEN7(dev_priv))
2108 			seq_printf(m, "GFX_MODE: 0x%08x\n",
2109 				   I915_READ(RING_MODE_GEN7(engine)));
2110 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2111 			   I915_READ(RING_PP_DIR_BASE(engine)));
2112 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2113 			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
2114 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2115 			   I915_READ(RING_PP_DIR_DCLV(engine)));
2116 	}
2117 	if (dev_priv->mm.aliasing_ppgtt) {
2118 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2119 
2120 		seq_puts(m, "aliasing PPGTT:\n");
2121 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2122 
2123 		ppgtt->debug_dump(ppgtt, m);
2124 	}
2125 
2126 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2127 }
2128 
2129 static int i915_ppgtt_info(struct seq_file *m, void *data)
2130 {
2131 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2132 	struct drm_device *dev = &dev_priv->drm;
2133 	struct drm_file *file;
2134 	int ret;
2135 
2136 	mutex_lock(&dev->filelist_mutex);
2137 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2138 	if (ret)
2139 		goto out_unlock;
2140 
2141 	intel_runtime_pm_get(dev_priv);
2142 
2143 	if (INTEL_GEN(dev_priv) >= 8)
2144 		gen8_ppgtt_info(m, dev_priv);
2145 	else if (INTEL_GEN(dev_priv) >= 6)
2146 		gen6_ppgtt_info(m, dev_priv);
2147 
2148 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2149 		struct drm_i915_file_private *file_priv = file->driver_priv;
2150 		struct task_struct *task;
2151 
2152 		task = get_pid_task(file->pid, PIDTYPE_PID);
2153 		if (!task) {
2154 			ret = -ESRCH;
2155 			goto out_rpm;
2156 		}
2157 		seq_printf(m, "\nproc: %s\n", task->comm);
2158 		put_task_struct(task);
2159 		idr_for_each(&file_priv->context_idr, per_file_ctx,
2160 			     (void *)(unsigned long)m);
2161 	}
2162 
2163 out_rpm:
2164 	intel_runtime_pm_put(dev_priv);
2165 	mutex_unlock(&dev->struct_mutex);
2166 out_unlock:
2167 	mutex_unlock(&dev->filelist_mutex);
2168 	return ret;
2169 }
2170 
2171 static int count_irq_waiters(struct drm_i915_private *i915)
2172 {
2173 	struct intel_engine_cs *engine;
2174 	enum intel_engine_id id;
2175 	int count = 0;
2176 
2177 	for_each_engine(engine, i915, id)
2178 		count += intel_engine_has_waiter(engine);
2179 
2180 	return count;
2181 }
2182 
2183 static const char *rps_power_to_str(unsigned int power)
2184 {
2185 	static const char * const strings[] = {
2186 		[LOW_POWER] = "low power",
2187 		[BETWEEN] = "mixed",
2188 		[HIGH_POWER] = "high power",
2189 	};
2190 
2191 	if (power >= ARRAY_SIZE(strings) || !strings[power])
2192 		return "unknown";
2193 
2194 	return strings[power];
2195 }
2196 
2197 static int i915_rps_boost_info(struct seq_file *m, void *data)
2198 {
2199 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2200 	struct drm_device *dev = &dev_priv->drm;
2201 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
2202 	struct drm_file *file;
2203 
2204 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2205 	seq_printf(m, "GPU busy? %s [%d requests]\n",
2206 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2207 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2208 	seq_printf(m, "Boosts outstanding? %d\n",
2209 		   atomic_read(&rps->num_waiters));
2210 	seq_printf(m, "Frequency requested %d\n",
2211 		   intel_gpu_freq(dev_priv, rps->cur_freq));
2212 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2213 		   intel_gpu_freq(dev_priv, rps->min_freq),
2214 		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2215 		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2216 		   intel_gpu_freq(dev_priv, rps->max_freq));
2217 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2218 		   intel_gpu_freq(dev_priv, rps->idle_freq),
2219 		   intel_gpu_freq(dev_priv, rps->efficient_freq),
2220 		   intel_gpu_freq(dev_priv, rps->boost_freq));
2221 
2222 	mutex_lock(&dev->filelist_mutex);
2223 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2224 		struct drm_i915_file_private *file_priv = file->driver_priv;
2225 		struct task_struct *task;
2226 
2227 		rcu_read_lock();
2228 		task = pid_task(file->pid, PIDTYPE_PID);
2229 		seq_printf(m, "%s [%d]: %d boosts\n",
2230 			   task ? task->comm : "<unknown>",
2231 			   task ? task->pid : -1,
2232 			   atomic_read(&file_priv->rps_client.boosts));
2233 		rcu_read_unlock();
2234 	}
2235 	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2236 		   atomic_read(&rps->boosts));
2237 	mutex_unlock(&dev->filelist_mutex);
2238 
2239 	if (INTEL_GEN(dev_priv) >= 6 &&
2240 	    rps->enabled &&
2241 	    dev_priv->gt.active_requests) {
2242 		u32 rpup, rpupei;
2243 		u32 rpdown, rpdownei;
2244 
2245 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2246 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2247 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2248 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2249 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2250 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2251 
2252 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2253 			   rps_power_to_str(rps->power));
2254 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2255 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
2256 			   rps->up_threshold);
2257 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2258 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2259 			   rps->down_threshold);
2260 	} else {
2261 		seq_puts(m, "\nRPS Autotuning inactive\n");
2262 	}
2263 
2264 	return 0;
2265 }
2266 
2267 static int i915_llc(struct seq_file *m, void *data)
2268 {
2269 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2270 	const bool edram = INTEL_GEN(dev_priv) > 8;
2271 
2272 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2273 	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2274 		   intel_uncore_edram_size(dev_priv)/1024/1024);
2275 
2276 	return 0;
2277 }
2278 
2279 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2280 {
2281 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2282 	struct drm_printer p;
2283 
2284 	if (!HAS_HUC(dev_priv))
2285 		return -ENODEV;
2286 
2287 	p = drm_seq_file_printer(m);
2288 	intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2289 
2290 	intel_runtime_pm_get(dev_priv);
2291 	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2292 	intel_runtime_pm_put(dev_priv);
2293 
2294 	return 0;
2295 }
2296 
2297 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2298 {
2299 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2300 	struct drm_printer p;
2301 	u32 tmp, i;
2302 
2303 	if (!HAS_GUC(dev_priv))
2304 		return -ENODEV;
2305 
2306 	p = drm_seq_file_printer(m);
2307 	intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2308 
2309 	intel_runtime_pm_get(dev_priv);
2310 
2311 	tmp = I915_READ(GUC_STATUS);
2312 
2313 	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2314 	seq_printf(m, "\tBootrom status = 0x%x\n",
2315 		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2316 	seq_printf(m, "\tuKernel status = 0x%x\n",
2317 		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2318 	seq_printf(m, "\tMIA Core status = 0x%x\n",
2319 		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2320 	seq_puts(m, "\nScratch registers:\n");
2321 	for (i = 0; i < 16; i++)
2322 		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2323 
2324 	intel_runtime_pm_put(dev_priv);
2325 
2326 	return 0;
2327 }
2328 
2329 static void i915_guc_log_info(struct seq_file *m,
2330 			      struct drm_i915_private *dev_priv)
2331 {
2332 	struct intel_guc *guc = &dev_priv->guc;
2333 
2334 	seq_puts(m, "\nGuC logging stats:\n");
2335 
2336 	seq_printf(m, "\tISR:   flush count %10u, overflow count %10u\n",
2337 		   guc->log.flush_count[GUC_ISR_LOG_BUFFER],
2338 		   guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]);
2339 
2340 	seq_printf(m, "\tDPC:   flush count %10u, overflow count %10u\n",
2341 		   guc->log.flush_count[GUC_DPC_LOG_BUFFER],
2342 		   guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]);
2343 
2344 	seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n",
2345 		   guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER],
2346 		   guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]);
2347 
2348 	seq_printf(m, "\tTotal flush interrupt count: %u\n",
2349 		   guc->log.flush_interrupt_count);
2350 
2351 	seq_printf(m, "\tCapture miss count: %u\n",
2352 		   guc->log.capture_miss_count);
2353 }
2354 
2355 static void i915_guc_client_info(struct seq_file *m,
2356 				 struct drm_i915_private *dev_priv,
2357 				 struct intel_guc_client *client)
2358 {
2359 	struct intel_engine_cs *engine;
2360 	enum intel_engine_id id;
2361 	uint64_t tot = 0;
2362 
2363 	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2364 		client->priority, client->stage_id, client->proc_desc_offset);
2365 	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2366 		client->doorbell_id, client->doorbell_offset);
2367 
2368 	for_each_engine(engine, dev_priv, id) {
2369 		u64 submissions = client->submissions[id];
2370 		tot += submissions;
2371 		seq_printf(m, "\tSubmissions: %llu %s\n",
2372 				submissions, engine->name);
2373 	}
2374 	seq_printf(m, "\tTotal: %llu\n", tot);
2375 }
2376 
2377 static int i915_guc_info(struct seq_file *m, void *data)
2378 {
2379 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2380 	const struct intel_guc *guc = &dev_priv->guc;
2381 
2382 	if (!USES_GUC_SUBMISSION(dev_priv))
2383 		return -ENODEV;
2384 
2385 	GEM_BUG_ON(!guc->execbuf_client);
2386 
2387 	seq_printf(m, "Doorbell map:\n");
2388 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2389 	seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
2390 
2391 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2392 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2393 	if (guc->preempt_client) {
2394 		seq_printf(m, "\nGuC preempt client @ %p:\n",
2395 			   guc->preempt_client);
2396 		i915_guc_client_info(m, dev_priv, guc->preempt_client);
2397 	}
2398 
2399 	i915_guc_log_info(m, dev_priv);
2400 
2401 	/* Add more as required ... */
2402 
2403 	return 0;
2404 }
2405 
2406 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2407 {
2408 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2409 	const struct intel_guc *guc = &dev_priv->guc;
2410 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2411 	struct intel_guc_client *client = guc->execbuf_client;
2412 	unsigned int tmp;
2413 	int index;
2414 
2415 	if (!USES_GUC_SUBMISSION(dev_priv))
2416 		return -ENODEV;
2417 
2418 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2419 		struct intel_engine_cs *engine;
2420 
2421 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2422 			continue;
2423 
2424 		seq_printf(m, "GuC stage descriptor %u:\n", index);
2425 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2426 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2427 		seq_printf(m, "\tPriority: %d\n", desc->priority);
2428 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2429 		seq_printf(m, "\tEngines used: 0x%x\n",
2430 			   desc->engines_used);
2431 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2432 			   desc->db_trigger_phy,
2433 			   desc->db_trigger_cpu,
2434 			   desc->db_trigger_uk);
2435 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
2436 			   desc->process_desc);
2437 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2438 			   desc->wq_addr, desc->wq_size);
2439 		seq_putc(m, '\n');
2440 
2441 		for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2442 			u32 guc_engine_id = engine->guc_id;
2443 			struct guc_execlist_context *lrc =
2444 						&desc->lrc[guc_engine_id];
2445 
2446 			seq_printf(m, "\t%s LRC:\n", engine->name);
2447 			seq_printf(m, "\t\tContext desc: 0x%x\n",
2448 				   lrc->context_desc);
2449 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2450 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2451 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2452 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2453 			seq_putc(m, '\n');
2454 		}
2455 	}
2456 
2457 	return 0;
2458 }
2459 
2460 static int i915_guc_log_dump(struct seq_file *m, void *data)
2461 {
2462 	struct drm_info_node *node = m->private;
2463 	struct drm_i915_private *dev_priv = node_to_i915(node);
2464 	bool dump_load_err = !!node->info_ent->data;
2465 	struct drm_i915_gem_object *obj = NULL;
2466 	u32 *log;
2467 	int i = 0;
2468 
2469 	if (!HAS_GUC(dev_priv))
2470 		return -ENODEV;
2471 
2472 	if (dump_load_err)
2473 		obj = dev_priv->guc.load_err_log;
2474 	else if (dev_priv->guc.log.vma)
2475 		obj = dev_priv->guc.log.vma->obj;
2476 
2477 	if (!obj)
2478 		return 0;
2479 
2480 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2481 	if (IS_ERR(log)) {
2482 		DRM_DEBUG("Failed to pin object\n");
2483 		seq_puts(m, "(log data unaccessible)\n");
2484 		return PTR_ERR(log);
2485 	}
2486 
2487 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2488 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2489 			   *(log + i), *(log + i + 1),
2490 			   *(log + i + 2), *(log + i + 3));
2491 
2492 	seq_putc(m, '\n');
2493 
2494 	i915_gem_object_unpin_map(obj);
2495 
2496 	return 0;
2497 }
2498 
2499 static int i915_guc_log_control_get(void *data, u64 *val)
2500 {
2501 	struct drm_i915_private *dev_priv = data;
2502 
2503 	if (!HAS_GUC(dev_priv))
2504 		return -ENODEV;
2505 
2506 	if (!dev_priv->guc.log.vma)
2507 		return -EINVAL;
2508 
2509 	*val = i915_modparams.guc_log_level;
2510 
2511 	return 0;
2512 }
2513 
2514 static int i915_guc_log_control_set(void *data, u64 val)
2515 {
2516 	struct drm_i915_private *dev_priv = data;
2517 
2518 	if (!HAS_GUC(dev_priv))
2519 		return -ENODEV;
2520 
2521 	return intel_guc_log_control(&dev_priv->guc, val);
2522 }
2523 
2524 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
2525 			i915_guc_log_control_get, i915_guc_log_control_set,
2526 			"%lld\n");
2527 
2528 static const char *psr2_live_status(u32 val)
2529 {
2530 	static const char * const live_status[] = {
2531 		"IDLE",
2532 		"CAPTURE",
2533 		"CAPTURE_FS",
2534 		"SLEEP",
2535 		"BUFON_FW",
2536 		"ML_UP",
2537 		"SU_STANDBY",
2538 		"FAST_SLEEP",
2539 		"DEEP_SLEEP",
2540 		"BUF_ON",
2541 		"TG_ON"
2542 	};
2543 
2544 	val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
2545 	if (val < ARRAY_SIZE(live_status))
2546 		return live_status[val];
2547 
2548 	return "unknown";
2549 }
2550 
2551 static int i915_edp_psr_status(struct seq_file *m, void *data)
2552 {
2553 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2554 	u32 psrperf = 0;
2555 	u32 stat[3];
2556 	enum pipe pipe;
2557 	bool enabled = false;
2558 	bool sink_support;
2559 
2560 	if (!HAS_PSR(dev_priv))
2561 		return -ENODEV;
2562 
2563 	sink_support = dev_priv->psr.sink_support;
2564 	seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2565 	if (!sink_support)
2566 		return 0;
2567 
2568 	intel_runtime_pm_get(dev_priv);
2569 
2570 	mutex_lock(&dev_priv->psr.lock);
2571 	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2572 	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2573 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2574 		   dev_priv->psr.busy_frontbuffer_bits);
2575 	seq_printf(m, "Re-enable work scheduled: %s\n",
2576 		   yesno(work_busy(&dev_priv->psr.work.work)));
2577 
2578 	if (HAS_DDI(dev_priv)) {
2579 		if (dev_priv->psr.psr2_support)
2580 			enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2581 		else
2582 			enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2583 	} else {
2584 		for_each_pipe(dev_priv, pipe) {
2585 			enum transcoder cpu_transcoder =
2586 				intel_pipe_to_cpu_transcoder(dev_priv, pipe);
2587 			enum intel_display_power_domain power_domain;
2588 
2589 			power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
2590 			if (!intel_display_power_get_if_enabled(dev_priv,
2591 								power_domain))
2592 				continue;
2593 
2594 			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2595 				VLV_EDP_PSR_CURR_STATE_MASK;
2596 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2597 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2598 				enabled = true;
2599 
2600 			intel_display_power_put(dev_priv, power_domain);
2601 		}
2602 	}
2603 
2604 	seq_printf(m, "Main link in standby mode: %s\n",
2605 		   yesno(dev_priv->psr.link_standby));
2606 
2607 	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2608 
2609 	if (!HAS_DDI(dev_priv))
2610 		for_each_pipe(dev_priv, pipe) {
2611 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2612 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2613 				seq_printf(m, " pipe %c", pipe_name(pipe));
2614 		}
2615 	seq_puts(m, "\n");
2616 
2617 	/*
2618 	 * VLV/CHV PSR has no kind of performance counter
2619 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2620 	 */
2621 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2622 		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2623 			EDP_PSR_PERF_CNT_MASK;
2624 
2625 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2626 	}
2627 	if (dev_priv->psr.psr2_support) {
2628 		u32 psr2 = I915_READ(EDP_PSR2_STATUS);
2629 
2630 		seq_printf(m, "EDP_PSR2_STATUS: %x [%s]\n",
2631 			   psr2, psr2_live_status(psr2));
2632 	}
2633 	mutex_unlock(&dev_priv->psr.lock);
2634 
2635 	intel_runtime_pm_put(dev_priv);
2636 	return 0;
2637 }
2638 
2639 static int i915_sink_crc(struct seq_file *m, void *data)
2640 {
2641 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2642 	struct drm_device *dev = &dev_priv->drm;
2643 	struct intel_connector *connector;
2644 	struct drm_connector_list_iter conn_iter;
2645 	struct intel_dp *intel_dp = NULL;
2646 	struct drm_modeset_acquire_ctx ctx;
2647 	int ret;
2648 	u8 crc[6];
2649 
2650 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2651 
2652 	drm_connector_list_iter_begin(dev, &conn_iter);
2653 
2654 	for_each_intel_connector_iter(connector, &conn_iter) {
2655 		struct drm_crtc *crtc;
2656 		struct drm_connector_state *state;
2657 		struct intel_crtc_state *crtc_state;
2658 
2659 		if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2660 			continue;
2661 
2662 retry:
2663 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
2664 		if (ret)
2665 			goto err;
2666 
2667 		state = connector->base.state;
2668 		if (!state->best_encoder)
2669 			continue;
2670 
2671 		crtc = state->crtc;
2672 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2673 		if (ret)
2674 			goto err;
2675 
2676 		crtc_state = to_intel_crtc_state(crtc->state);
2677 		if (!crtc_state->base.active)
2678 			continue;
2679 
2680 		/*
2681 		 * We need to wait for all crtc updates to complete, to make
2682 		 * sure any pending modesets and plane updates are completed.
2683 		 */
2684 		if (crtc_state->base.commit) {
2685 			ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
2686 
2687 			if (ret)
2688 				goto err;
2689 		}
2690 
2691 		intel_dp = enc_to_intel_dp(state->best_encoder);
2692 
2693 		ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
2694 		if (ret)
2695 			goto err;
2696 
2697 		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2698 			   crc[0], crc[1], crc[2],
2699 			   crc[3], crc[4], crc[5]);
2700 		goto out;
2701 
2702 err:
2703 		if (ret == -EDEADLK) {
2704 			ret = drm_modeset_backoff(&ctx);
2705 			if (!ret)
2706 				goto retry;
2707 		}
2708 		goto out;
2709 	}
2710 	ret = -ENODEV;
2711 out:
2712 	drm_connector_list_iter_end(&conn_iter);
2713 	drm_modeset_drop_locks(&ctx);
2714 	drm_modeset_acquire_fini(&ctx);
2715 
2716 	return ret;
2717 }
2718 
2719 static int i915_energy_uJ(struct seq_file *m, void *data)
2720 {
2721 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2722 	unsigned long long power;
2723 	u32 units;
2724 
2725 	if (INTEL_GEN(dev_priv) < 6)
2726 		return -ENODEV;
2727 
2728 	intel_runtime_pm_get(dev_priv);
2729 
2730 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2731 		intel_runtime_pm_put(dev_priv);
2732 		return -ENODEV;
2733 	}
2734 
2735 	units = (power & 0x1f00) >> 8;
2736 	power = I915_READ(MCH_SECP_NRG_STTS);
2737 	power = (1000000 * power) >> units; /* convert to uJ */
2738 
2739 	intel_runtime_pm_put(dev_priv);
2740 
2741 	seq_printf(m, "%llu", power);
2742 
2743 	return 0;
2744 }
2745 
2746 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2747 {
2748 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2749 	struct pci_dev *pdev = dev_priv->drm.pdev;
2750 
2751 	if (!HAS_RUNTIME_PM(dev_priv))
2752 		seq_puts(m, "Runtime power management not supported\n");
2753 
2754 	seq_printf(m, "GPU idle: %s (epoch %u)\n",
2755 		   yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2756 	seq_printf(m, "IRQs disabled: %s\n",
2757 		   yesno(!intel_irqs_enabled(dev_priv)));
2758 #ifdef CONFIG_PM
2759 	seq_printf(m, "Usage count: %d\n",
2760 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2761 #else
2762 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2763 #endif
2764 	seq_printf(m, "PCI device power state: %s [%d]\n",
2765 		   pci_power_name(pdev->current_state),
2766 		   pdev->current_state);
2767 
2768 	return 0;
2769 }
2770 
2771 static int i915_power_domain_info(struct seq_file *m, void *unused)
2772 {
2773 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2774 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2775 	int i;
2776 
2777 	mutex_lock(&power_domains->lock);
2778 
2779 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2780 	for (i = 0; i < power_domains->power_well_count; i++) {
2781 		struct i915_power_well *power_well;
2782 		enum intel_display_power_domain power_domain;
2783 
2784 		power_well = &power_domains->power_wells[i];
2785 		seq_printf(m, "%-25s %d\n", power_well->name,
2786 			   power_well->count);
2787 
2788 		for_each_power_domain(power_domain, power_well->domains)
2789 			seq_printf(m, "  %-23s %d\n",
2790 				 intel_display_power_domain_str(power_domain),
2791 				 power_domains->domain_use_count[power_domain]);
2792 	}
2793 
2794 	mutex_unlock(&power_domains->lock);
2795 
2796 	return 0;
2797 }
2798 
2799 static int i915_dmc_info(struct seq_file *m, void *unused)
2800 {
2801 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2802 	struct intel_csr *csr;
2803 
2804 	if (!HAS_CSR(dev_priv))
2805 		return -ENODEV;
2806 
2807 	csr = &dev_priv->csr;
2808 
2809 	intel_runtime_pm_get(dev_priv);
2810 
2811 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2812 	seq_printf(m, "path: %s\n", csr->fw_path);
2813 
2814 	if (!csr->dmc_payload)
2815 		goto out;
2816 
2817 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2818 		   CSR_VERSION_MINOR(csr->version));
2819 
2820 	if (IS_KABYLAKE(dev_priv) ||
2821 	    (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2822 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2823 			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
2824 		seq_printf(m, "DC5 -> DC6 count: %d\n",
2825 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2826 	} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2827 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2828 			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2829 	}
2830 
2831 out:
2832 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2833 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2834 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2835 
2836 	intel_runtime_pm_put(dev_priv);
2837 
2838 	return 0;
2839 }
2840 
2841 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2842 				 struct drm_display_mode *mode)
2843 {
2844 	int i;
2845 
2846 	for (i = 0; i < tabs; i++)
2847 		seq_putc(m, '\t');
2848 
2849 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2850 		   mode->base.id, mode->name,
2851 		   mode->vrefresh, mode->clock,
2852 		   mode->hdisplay, mode->hsync_start,
2853 		   mode->hsync_end, mode->htotal,
2854 		   mode->vdisplay, mode->vsync_start,
2855 		   mode->vsync_end, mode->vtotal,
2856 		   mode->type, mode->flags);
2857 }
2858 
2859 static void intel_encoder_info(struct seq_file *m,
2860 			       struct intel_crtc *intel_crtc,
2861 			       struct intel_encoder *intel_encoder)
2862 {
2863 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2864 	struct drm_device *dev = &dev_priv->drm;
2865 	struct drm_crtc *crtc = &intel_crtc->base;
2866 	struct intel_connector *intel_connector;
2867 	struct drm_encoder *encoder;
2868 
2869 	encoder = &intel_encoder->base;
2870 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2871 		   encoder->base.id, encoder->name);
2872 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2873 		struct drm_connector *connector = &intel_connector->base;
2874 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2875 			   connector->base.id,
2876 			   connector->name,
2877 			   drm_get_connector_status_name(connector->status));
2878 		if (connector->status == connector_status_connected) {
2879 			struct drm_display_mode *mode = &crtc->mode;
2880 			seq_printf(m, ", mode:\n");
2881 			intel_seq_print_mode(m, 2, mode);
2882 		} else {
2883 			seq_putc(m, '\n');
2884 		}
2885 	}
2886 }
2887 
2888 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2889 {
2890 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2891 	struct drm_device *dev = &dev_priv->drm;
2892 	struct drm_crtc *crtc = &intel_crtc->base;
2893 	struct intel_encoder *intel_encoder;
2894 	struct drm_plane_state *plane_state = crtc->primary->state;
2895 	struct drm_framebuffer *fb = plane_state->fb;
2896 
2897 	if (fb)
2898 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2899 			   fb->base.id, plane_state->src_x >> 16,
2900 			   plane_state->src_y >> 16, fb->width, fb->height);
2901 	else
2902 		seq_puts(m, "\tprimary plane disabled\n");
2903 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2904 		intel_encoder_info(m, intel_crtc, intel_encoder);
2905 }
2906 
2907 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2908 {
2909 	struct drm_display_mode *mode = panel->fixed_mode;
2910 
2911 	seq_printf(m, "\tfixed mode:\n");
2912 	intel_seq_print_mode(m, 2, mode);
2913 }
2914 
2915 static void intel_dp_info(struct seq_file *m,
2916 			  struct intel_connector *intel_connector)
2917 {
2918 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2919 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2920 
2921 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2922 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2923 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2924 		intel_panel_info(m, &intel_connector->panel);
2925 
2926 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2927 				&intel_dp->aux);
2928 }
2929 
2930 static void intel_dp_mst_info(struct seq_file *m,
2931 			  struct intel_connector *intel_connector)
2932 {
2933 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2934 	struct intel_dp_mst_encoder *intel_mst =
2935 		enc_to_mst(&intel_encoder->base);
2936 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
2937 	struct intel_dp *intel_dp = &intel_dig_port->dp;
2938 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2939 					intel_connector->port);
2940 
2941 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2942 }
2943 
2944 static void intel_hdmi_info(struct seq_file *m,
2945 			    struct intel_connector *intel_connector)
2946 {
2947 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2948 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2949 
2950 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2951 }
2952 
2953 static void intel_lvds_info(struct seq_file *m,
2954 			    struct intel_connector *intel_connector)
2955 {
2956 	intel_panel_info(m, &intel_connector->panel);
2957 }
2958 
2959 static void intel_connector_info(struct seq_file *m,
2960 				 struct drm_connector *connector)
2961 {
2962 	struct intel_connector *intel_connector = to_intel_connector(connector);
2963 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2964 	struct drm_display_mode *mode;
2965 
2966 	seq_printf(m, "connector %d: type %s, status: %s\n",
2967 		   connector->base.id, connector->name,
2968 		   drm_get_connector_status_name(connector->status));
2969 	if (connector->status == connector_status_connected) {
2970 		seq_printf(m, "\tname: %s\n", connector->display_info.name);
2971 		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2972 			   connector->display_info.width_mm,
2973 			   connector->display_info.height_mm);
2974 		seq_printf(m, "\tsubpixel order: %s\n",
2975 			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2976 		seq_printf(m, "\tCEA rev: %d\n",
2977 			   connector->display_info.cea_rev);
2978 	}
2979 
2980 	if (!intel_encoder)
2981 		return;
2982 
2983 	switch (connector->connector_type) {
2984 	case DRM_MODE_CONNECTOR_DisplayPort:
2985 	case DRM_MODE_CONNECTOR_eDP:
2986 		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2987 			intel_dp_mst_info(m, intel_connector);
2988 		else
2989 			intel_dp_info(m, intel_connector);
2990 		break;
2991 	case DRM_MODE_CONNECTOR_LVDS:
2992 		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2993 			intel_lvds_info(m, intel_connector);
2994 		break;
2995 	case DRM_MODE_CONNECTOR_HDMIA:
2996 		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2997 		    intel_encoder->type == INTEL_OUTPUT_DDI)
2998 			intel_hdmi_info(m, intel_connector);
2999 		break;
3000 	default:
3001 		break;
3002 	}
3003 
3004 	seq_printf(m, "\tmodes:\n");
3005 	list_for_each_entry(mode, &connector->modes, head)
3006 		intel_seq_print_mode(m, 2, mode);
3007 }
3008 
3009 static const char *plane_type(enum drm_plane_type type)
3010 {
3011 	switch (type) {
3012 	case DRM_PLANE_TYPE_OVERLAY:
3013 		return "OVL";
3014 	case DRM_PLANE_TYPE_PRIMARY:
3015 		return "PRI";
3016 	case DRM_PLANE_TYPE_CURSOR:
3017 		return "CUR";
3018 	/*
3019 	 * Deliberately omitting default: to generate compiler warnings
3020 	 * when a new drm_plane_type gets added.
3021 	 */
3022 	}
3023 
3024 	return "unknown";
3025 }
3026 
3027 static const char *plane_rotation(unsigned int rotation)
3028 {
3029 	static char buf[48];
3030 	/*
3031 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3032 	 * will print them all to visualize if the values are misused
3033 	 */
3034 	snprintf(buf, sizeof(buf),
3035 		 "%s%s%s%s%s%s(0x%08x)",
3036 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3037 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3038 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3039 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3040 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3041 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3042 		 rotation);
3043 
3044 	return buf;
3045 }
3046 
3047 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3048 {
3049 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3050 	struct drm_device *dev = &dev_priv->drm;
3051 	struct intel_plane *intel_plane;
3052 
3053 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3054 		struct drm_plane_state *state;
3055 		struct drm_plane *plane = &intel_plane->base;
3056 		struct drm_format_name_buf format_name;
3057 
3058 		if (!plane->state) {
3059 			seq_puts(m, "plane->state is NULL!\n");
3060 			continue;
3061 		}
3062 
3063 		state = plane->state;
3064 
3065 		if (state->fb) {
3066 			drm_get_format_name(state->fb->format->format,
3067 					    &format_name);
3068 		} else {
3069 			sprintf(format_name.str, "N/A");
3070 		}
3071 
3072 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3073 			   plane->base.id,
3074 			   plane_type(intel_plane->base.type),
3075 			   state->crtc_x, state->crtc_y,
3076 			   state->crtc_w, state->crtc_h,
3077 			   (state->src_x >> 16),
3078 			   ((state->src_x & 0xffff) * 15625) >> 10,
3079 			   (state->src_y >> 16),
3080 			   ((state->src_y & 0xffff) * 15625) >> 10,
3081 			   (state->src_w >> 16),
3082 			   ((state->src_w & 0xffff) * 15625) >> 10,
3083 			   (state->src_h >> 16),
3084 			   ((state->src_h & 0xffff) * 15625) >> 10,
3085 			   format_name.str,
3086 			   plane_rotation(state->rotation));
3087 	}
3088 }
3089 
3090 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3091 {
3092 	struct intel_crtc_state *pipe_config;
3093 	int num_scalers = intel_crtc->num_scalers;
3094 	int i;
3095 
3096 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3097 
3098 	/* Not all platformas have a scaler */
3099 	if (num_scalers) {
3100 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3101 			   num_scalers,
3102 			   pipe_config->scaler_state.scaler_users,
3103 			   pipe_config->scaler_state.scaler_id);
3104 
3105 		for (i = 0; i < num_scalers; i++) {
3106 			struct intel_scaler *sc =
3107 					&pipe_config->scaler_state.scalers[i];
3108 
3109 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3110 				   i, yesno(sc->in_use), sc->mode);
3111 		}
3112 		seq_puts(m, "\n");
3113 	} else {
3114 		seq_puts(m, "\tNo scalers available on this platform\n");
3115 	}
3116 }
3117 
3118 static int i915_display_info(struct seq_file *m, void *unused)
3119 {
3120 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3121 	struct drm_device *dev = &dev_priv->drm;
3122 	struct intel_crtc *crtc;
3123 	struct drm_connector *connector;
3124 	struct drm_connector_list_iter conn_iter;
3125 
3126 	intel_runtime_pm_get(dev_priv);
3127 	seq_printf(m, "CRTC info\n");
3128 	seq_printf(m, "---------\n");
3129 	for_each_intel_crtc(dev, crtc) {
3130 		struct intel_crtc_state *pipe_config;
3131 
3132 		drm_modeset_lock(&crtc->base.mutex, NULL);
3133 		pipe_config = to_intel_crtc_state(crtc->base.state);
3134 
3135 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3136 			   crtc->base.base.id, pipe_name(crtc->pipe),
3137 			   yesno(pipe_config->base.active),
3138 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3139 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3140 
3141 		if (pipe_config->base.active) {
3142 			struct intel_plane *cursor =
3143 				to_intel_plane(crtc->base.cursor);
3144 
3145 			intel_crtc_info(m, crtc);
3146 
3147 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3148 				   yesno(cursor->base.state->visible),
3149 				   cursor->base.state->crtc_x,
3150 				   cursor->base.state->crtc_y,
3151 				   cursor->base.state->crtc_w,
3152 				   cursor->base.state->crtc_h,
3153 				   cursor->cursor.base);
3154 			intel_scaler_info(m, crtc);
3155 			intel_plane_info(m, crtc);
3156 		}
3157 
3158 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3159 			   yesno(!crtc->cpu_fifo_underrun_disabled),
3160 			   yesno(!crtc->pch_fifo_underrun_disabled));
3161 		drm_modeset_unlock(&crtc->base.mutex);
3162 	}
3163 
3164 	seq_printf(m, "\n");
3165 	seq_printf(m, "Connector info\n");
3166 	seq_printf(m, "--------------\n");
3167 	mutex_lock(&dev->mode_config.mutex);
3168 	drm_connector_list_iter_begin(dev, &conn_iter);
3169 	drm_for_each_connector_iter(connector, &conn_iter)
3170 		intel_connector_info(m, connector);
3171 	drm_connector_list_iter_end(&conn_iter);
3172 	mutex_unlock(&dev->mode_config.mutex);
3173 
3174 	intel_runtime_pm_put(dev_priv);
3175 
3176 	return 0;
3177 }
3178 
3179 static int i915_engine_info(struct seq_file *m, void *unused)
3180 {
3181 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3182 	struct intel_engine_cs *engine;
3183 	enum intel_engine_id id;
3184 	struct drm_printer p;
3185 
3186 	intel_runtime_pm_get(dev_priv);
3187 
3188 	seq_printf(m, "GT awake? %s (epoch %u)\n",
3189 		   yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3190 	seq_printf(m, "Global active requests: %d\n",
3191 		   dev_priv->gt.active_requests);
3192 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
3193 		   dev_priv->info.cs_timestamp_frequency_khz);
3194 
3195 	p = drm_seq_file_printer(m);
3196 	for_each_engine(engine, dev_priv, id)
3197 		intel_engine_dump(engine, &p, "%s\n", engine->name);
3198 
3199 	intel_runtime_pm_put(dev_priv);
3200 
3201 	return 0;
3202 }
3203 
3204 static int i915_rcs_topology(struct seq_file *m, void *unused)
3205 {
3206 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3207 	struct drm_printer p = drm_seq_file_printer(m);
3208 
3209 	intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3210 
3211 	return 0;
3212 }
3213 
3214 static int i915_shrinker_info(struct seq_file *m, void *unused)
3215 {
3216 	struct drm_i915_private *i915 = node_to_i915(m->private);
3217 
3218 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3219 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3220 
3221 	return 0;
3222 }
3223 
3224 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3225 {
3226 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3227 	struct drm_device *dev = &dev_priv->drm;
3228 	int i;
3229 
3230 	drm_modeset_lock_all(dev);
3231 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3232 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3233 
3234 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3235 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3236 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3237 		seq_printf(m, " tracked hardware state:\n");
3238 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3239 		seq_printf(m, " dpll_md: 0x%08x\n",
3240 			   pll->state.hw_state.dpll_md);
3241 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3242 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3243 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3244 	}
3245 	drm_modeset_unlock_all(dev);
3246 
3247 	return 0;
3248 }
3249 
3250 static int i915_wa_registers(struct seq_file *m, void *unused)
3251 {
3252 	int i;
3253 	int ret;
3254 	struct intel_engine_cs *engine;
3255 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3256 	struct drm_device *dev = &dev_priv->drm;
3257 	struct i915_workarounds *workarounds = &dev_priv->workarounds;
3258 	enum intel_engine_id id;
3259 
3260 	ret = mutex_lock_interruptible(&dev->struct_mutex);
3261 	if (ret)
3262 		return ret;
3263 
3264 	intel_runtime_pm_get(dev_priv);
3265 
3266 	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3267 	for_each_engine(engine, dev_priv, id)
3268 		seq_printf(m, "HW whitelist count for %s: %d\n",
3269 			   engine->name, workarounds->hw_whitelist_count[id]);
3270 	for (i = 0; i < workarounds->count; ++i) {
3271 		i915_reg_t addr;
3272 		u32 mask, value, read;
3273 		bool ok;
3274 
3275 		addr = workarounds->reg[i].addr;
3276 		mask = workarounds->reg[i].mask;
3277 		value = workarounds->reg[i].value;
3278 		read = I915_READ(addr);
3279 		ok = (value & mask) == (read & mask);
3280 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3281 			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3282 	}
3283 
3284 	intel_runtime_pm_put(dev_priv);
3285 	mutex_unlock(&dev->struct_mutex);
3286 
3287 	return 0;
3288 }
3289 
3290 static int i915_ipc_status_show(struct seq_file *m, void *data)
3291 {
3292 	struct drm_i915_private *dev_priv = m->private;
3293 
3294 	seq_printf(m, "Isochronous Priority Control: %s\n",
3295 			yesno(dev_priv->ipc_enabled));
3296 	return 0;
3297 }
3298 
3299 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3300 {
3301 	struct drm_i915_private *dev_priv = inode->i_private;
3302 
3303 	if (!HAS_IPC(dev_priv))
3304 		return -ENODEV;
3305 
3306 	return single_open(file, i915_ipc_status_show, dev_priv);
3307 }
3308 
3309 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3310 				     size_t len, loff_t *offp)
3311 {
3312 	struct seq_file *m = file->private_data;
3313 	struct drm_i915_private *dev_priv = m->private;
3314 	int ret;
3315 	bool enable;
3316 
3317 	ret = kstrtobool_from_user(ubuf, len, &enable);
3318 	if (ret < 0)
3319 		return ret;
3320 
3321 	intel_runtime_pm_get(dev_priv);
3322 	if (!dev_priv->ipc_enabled && enable)
3323 		DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3324 	dev_priv->wm.distrust_bios_wm = true;
3325 	dev_priv->ipc_enabled = enable;
3326 	intel_enable_ipc(dev_priv);
3327 	intel_runtime_pm_put(dev_priv);
3328 
3329 	return len;
3330 }
3331 
3332 static const struct file_operations i915_ipc_status_fops = {
3333 	.owner = THIS_MODULE,
3334 	.open = i915_ipc_status_open,
3335 	.read = seq_read,
3336 	.llseek = seq_lseek,
3337 	.release = single_release,
3338 	.write = i915_ipc_status_write
3339 };
3340 
3341 static int i915_ddb_info(struct seq_file *m, void *unused)
3342 {
3343 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3344 	struct drm_device *dev = &dev_priv->drm;
3345 	struct skl_ddb_allocation *ddb;
3346 	struct skl_ddb_entry *entry;
3347 	enum pipe pipe;
3348 	int plane;
3349 
3350 	if (INTEL_GEN(dev_priv) < 9)
3351 		return -ENODEV;
3352 
3353 	drm_modeset_lock_all(dev);
3354 
3355 	ddb = &dev_priv->wm.skl_hw.ddb;
3356 
3357 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3358 
3359 	for_each_pipe(dev_priv, pipe) {
3360 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3361 
3362 		for_each_universal_plane(dev_priv, pipe, plane) {
3363 			entry = &ddb->plane[pipe][plane];
3364 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3365 				   entry->start, entry->end,
3366 				   skl_ddb_entry_size(entry));
3367 		}
3368 
3369 		entry = &ddb->plane[pipe][PLANE_CURSOR];
3370 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3371 			   entry->end, skl_ddb_entry_size(entry));
3372 	}
3373 
3374 	drm_modeset_unlock_all(dev);
3375 
3376 	return 0;
3377 }
3378 
3379 static void drrs_status_per_crtc(struct seq_file *m,
3380 				 struct drm_device *dev,
3381 				 struct intel_crtc *intel_crtc)
3382 {
3383 	struct drm_i915_private *dev_priv = to_i915(dev);
3384 	struct i915_drrs *drrs = &dev_priv->drrs;
3385 	int vrefresh = 0;
3386 	struct drm_connector *connector;
3387 	struct drm_connector_list_iter conn_iter;
3388 
3389 	drm_connector_list_iter_begin(dev, &conn_iter);
3390 	drm_for_each_connector_iter(connector, &conn_iter) {
3391 		if (connector->state->crtc != &intel_crtc->base)
3392 			continue;
3393 
3394 		seq_printf(m, "%s:\n", connector->name);
3395 	}
3396 	drm_connector_list_iter_end(&conn_iter);
3397 
3398 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3399 		seq_puts(m, "\tVBT: DRRS_type: Static");
3400 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3401 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3402 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3403 		seq_puts(m, "\tVBT: DRRS_type: None");
3404 	else
3405 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3406 
3407 	seq_puts(m, "\n\n");
3408 
3409 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3410 		struct intel_panel *panel;
3411 
3412 		mutex_lock(&drrs->mutex);
3413 		/* DRRS Supported */
3414 		seq_puts(m, "\tDRRS Supported: Yes\n");
3415 
3416 		/* disable_drrs() will make drrs->dp NULL */
3417 		if (!drrs->dp) {
3418 			seq_puts(m, "Idleness DRRS: Disabled\n");
3419 			if (dev_priv->psr.enabled)
3420 				seq_puts(m,
3421 				"\tAs PSR is enabled, DRRS is not enabled\n");
3422 			mutex_unlock(&drrs->mutex);
3423 			return;
3424 		}
3425 
3426 		panel = &drrs->dp->attached_connector->panel;
3427 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3428 					drrs->busy_frontbuffer_bits);
3429 
3430 		seq_puts(m, "\n\t\t");
3431 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3432 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3433 			vrefresh = panel->fixed_mode->vrefresh;
3434 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3435 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3436 			vrefresh = panel->downclock_mode->vrefresh;
3437 		} else {
3438 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3439 						drrs->refresh_rate_type);
3440 			mutex_unlock(&drrs->mutex);
3441 			return;
3442 		}
3443 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3444 
3445 		seq_puts(m, "\n\t\t");
3446 		mutex_unlock(&drrs->mutex);
3447 	} else {
3448 		/* DRRS not supported. Print the VBT parameter*/
3449 		seq_puts(m, "\tDRRS Supported : No");
3450 	}
3451 	seq_puts(m, "\n");
3452 }
3453 
3454 static int i915_drrs_status(struct seq_file *m, void *unused)
3455 {
3456 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3457 	struct drm_device *dev = &dev_priv->drm;
3458 	struct intel_crtc *intel_crtc;
3459 	int active_crtc_cnt = 0;
3460 
3461 	drm_modeset_lock_all(dev);
3462 	for_each_intel_crtc(dev, intel_crtc) {
3463 		if (intel_crtc->base.state->active) {
3464 			active_crtc_cnt++;
3465 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3466 
3467 			drrs_status_per_crtc(m, dev, intel_crtc);
3468 		}
3469 	}
3470 	drm_modeset_unlock_all(dev);
3471 
3472 	if (!active_crtc_cnt)
3473 		seq_puts(m, "No active crtc found\n");
3474 
3475 	return 0;
3476 }
3477 
3478 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3479 {
3480 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3481 	struct drm_device *dev = &dev_priv->drm;
3482 	struct intel_encoder *intel_encoder;
3483 	struct intel_digital_port *intel_dig_port;
3484 	struct drm_connector *connector;
3485 	struct drm_connector_list_iter conn_iter;
3486 
3487 	drm_connector_list_iter_begin(dev, &conn_iter);
3488 	drm_for_each_connector_iter(connector, &conn_iter) {
3489 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3490 			continue;
3491 
3492 		intel_encoder = intel_attached_encoder(connector);
3493 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3494 			continue;
3495 
3496 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3497 		if (!intel_dig_port->dp.can_mst)
3498 			continue;
3499 
3500 		seq_printf(m, "MST Source Port %c\n",
3501 			   port_name(intel_dig_port->base.port));
3502 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3503 	}
3504 	drm_connector_list_iter_end(&conn_iter);
3505 
3506 	return 0;
3507 }
3508 
3509 static ssize_t i915_displayport_test_active_write(struct file *file,
3510 						  const char __user *ubuf,
3511 						  size_t len, loff_t *offp)
3512 {
3513 	char *input_buffer;
3514 	int status = 0;
3515 	struct drm_device *dev;
3516 	struct drm_connector *connector;
3517 	struct drm_connector_list_iter conn_iter;
3518 	struct intel_dp *intel_dp;
3519 	int val = 0;
3520 
3521 	dev = ((struct seq_file *)file->private_data)->private;
3522 
3523 	if (len == 0)
3524 		return 0;
3525 
3526 	input_buffer = memdup_user_nul(ubuf, len);
3527 	if (IS_ERR(input_buffer))
3528 		return PTR_ERR(input_buffer);
3529 
3530 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3531 
3532 	drm_connector_list_iter_begin(dev, &conn_iter);
3533 	drm_for_each_connector_iter(connector, &conn_iter) {
3534 		struct intel_encoder *encoder;
3535 
3536 		if (connector->connector_type !=
3537 		    DRM_MODE_CONNECTOR_DisplayPort)
3538 			continue;
3539 
3540 		encoder = to_intel_encoder(connector->encoder);
3541 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3542 			continue;
3543 
3544 		if (encoder && connector->status == connector_status_connected) {
3545 			intel_dp = enc_to_intel_dp(&encoder->base);
3546 			status = kstrtoint(input_buffer, 10, &val);
3547 			if (status < 0)
3548 				break;
3549 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3550 			/* To prevent erroneous activation of the compliance
3551 			 * testing code, only accept an actual value of 1 here
3552 			 */
3553 			if (val == 1)
3554 				intel_dp->compliance.test_active = 1;
3555 			else
3556 				intel_dp->compliance.test_active = 0;
3557 		}
3558 	}
3559 	drm_connector_list_iter_end(&conn_iter);
3560 	kfree(input_buffer);
3561 	if (status < 0)
3562 		return status;
3563 
3564 	*offp += len;
3565 	return len;
3566 }
3567 
3568 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3569 {
3570 	struct drm_device *dev = m->private;
3571 	struct drm_connector *connector;
3572 	struct drm_connector_list_iter conn_iter;
3573 	struct intel_dp *intel_dp;
3574 
3575 	drm_connector_list_iter_begin(dev, &conn_iter);
3576 	drm_for_each_connector_iter(connector, &conn_iter) {
3577 		struct intel_encoder *encoder;
3578 
3579 		if (connector->connector_type !=
3580 		    DRM_MODE_CONNECTOR_DisplayPort)
3581 			continue;
3582 
3583 		encoder = to_intel_encoder(connector->encoder);
3584 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3585 			continue;
3586 
3587 		if (encoder && connector->status == connector_status_connected) {
3588 			intel_dp = enc_to_intel_dp(&encoder->base);
3589 			if (intel_dp->compliance.test_active)
3590 				seq_puts(m, "1");
3591 			else
3592 				seq_puts(m, "0");
3593 		} else
3594 			seq_puts(m, "0");
3595 	}
3596 	drm_connector_list_iter_end(&conn_iter);
3597 
3598 	return 0;
3599 }
3600 
3601 static int i915_displayport_test_active_open(struct inode *inode,
3602 					     struct file *file)
3603 {
3604 	struct drm_i915_private *dev_priv = inode->i_private;
3605 
3606 	return single_open(file, i915_displayport_test_active_show,
3607 			   &dev_priv->drm);
3608 }
3609 
3610 static const struct file_operations i915_displayport_test_active_fops = {
3611 	.owner = THIS_MODULE,
3612 	.open = i915_displayport_test_active_open,
3613 	.read = seq_read,
3614 	.llseek = seq_lseek,
3615 	.release = single_release,
3616 	.write = i915_displayport_test_active_write
3617 };
3618 
3619 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3620 {
3621 	struct drm_device *dev = m->private;
3622 	struct drm_connector *connector;
3623 	struct drm_connector_list_iter conn_iter;
3624 	struct intel_dp *intel_dp;
3625 
3626 	drm_connector_list_iter_begin(dev, &conn_iter);
3627 	drm_for_each_connector_iter(connector, &conn_iter) {
3628 		struct intel_encoder *encoder;
3629 
3630 		if (connector->connector_type !=
3631 		    DRM_MODE_CONNECTOR_DisplayPort)
3632 			continue;
3633 
3634 		encoder = to_intel_encoder(connector->encoder);
3635 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3636 			continue;
3637 
3638 		if (encoder && connector->status == connector_status_connected) {
3639 			intel_dp = enc_to_intel_dp(&encoder->base);
3640 			if (intel_dp->compliance.test_type ==
3641 			    DP_TEST_LINK_EDID_READ)
3642 				seq_printf(m, "%lx",
3643 					   intel_dp->compliance.test_data.edid);
3644 			else if (intel_dp->compliance.test_type ==
3645 				 DP_TEST_LINK_VIDEO_PATTERN) {
3646 				seq_printf(m, "hdisplay: %d\n",
3647 					   intel_dp->compliance.test_data.hdisplay);
3648 				seq_printf(m, "vdisplay: %d\n",
3649 					   intel_dp->compliance.test_data.vdisplay);
3650 				seq_printf(m, "bpc: %u\n",
3651 					   intel_dp->compliance.test_data.bpc);
3652 			}
3653 		} else
3654 			seq_puts(m, "0");
3655 	}
3656 	drm_connector_list_iter_end(&conn_iter);
3657 
3658 	return 0;
3659 }
3660 static int i915_displayport_test_data_open(struct inode *inode,
3661 					   struct file *file)
3662 {
3663 	struct drm_i915_private *dev_priv = inode->i_private;
3664 
3665 	return single_open(file, i915_displayport_test_data_show,
3666 			   &dev_priv->drm);
3667 }
3668 
3669 static const struct file_operations i915_displayport_test_data_fops = {
3670 	.owner = THIS_MODULE,
3671 	.open = i915_displayport_test_data_open,
3672 	.read = seq_read,
3673 	.llseek = seq_lseek,
3674 	.release = single_release
3675 };
3676 
3677 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3678 {
3679 	struct drm_device *dev = m->private;
3680 	struct drm_connector *connector;
3681 	struct drm_connector_list_iter conn_iter;
3682 	struct intel_dp *intel_dp;
3683 
3684 	drm_connector_list_iter_begin(dev, &conn_iter);
3685 	drm_for_each_connector_iter(connector, &conn_iter) {
3686 		struct intel_encoder *encoder;
3687 
3688 		if (connector->connector_type !=
3689 		    DRM_MODE_CONNECTOR_DisplayPort)
3690 			continue;
3691 
3692 		encoder = to_intel_encoder(connector->encoder);
3693 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3694 			continue;
3695 
3696 		if (encoder && connector->status == connector_status_connected) {
3697 			intel_dp = enc_to_intel_dp(&encoder->base);
3698 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3699 		} else
3700 			seq_puts(m, "0");
3701 	}
3702 	drm_connector_list_iter_end(&conn_iter);
3703 
3704 	return 0;
3705 }
3706 
3707 static int i915_displayport_test_type_open(struct inode *inode,
3708 				       struct file *file)
3709 {
3710 	struct drm_i915_private *dev_priv = inode->i_private;
3711 
3712 	return single_open(file, i915_displayport_test_type_show,
3713 			   &dev_priv->drm);
3714 }
3715 
3716 static const struct file_operations i915_displayport_test_type_fops = {
3717 	.owner = THIS_MODULE,
3718 	.open = i915_displayport_test_type_open,
3719 	.read = seq_read,
3720 	.llseek = seq_lseek,
3721 	.release = single_release
3722 };
3723 
3724 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3725 {
3726 	struct drm_i915_private *dev_priv = m->private;
3727 	struct drm_device *dev = &dev_priv->drm;
3728 	int level;
3729 	int num_levels;
3730 
3731 	if (IS_CHERRYVIEW(dev_priv))
3732 		num_levels = 3;
3733 	else if (IS_VALLEYVIEW(dev_priv))
3734 		num_levels = 1;
3735 	else if (IS_G4X(dev_priv))
3736 		num_levels = 3;
3737 	else
3738 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3739 
3740 	drm_modeset_lock_all(dev);
3741 
3742 	for (level = 0; level < num_levels; level++) {
3743 		unsigned int latency = wm[level];
3744 
3745 		/*
3746 		 * - WM1+ latency values in 0.5us units
3747 		 * - latencies are in us on gen9/vlv/chv
3748 		 */
3749 		if (INTEL_GEN(dev_priv) >= 9 ||
3750 		    IS_VALLEYVIEW(dev_priv) ||
3751 		    IS_CHERRYVIEW(dev_priv) ||
3752 		    IS_G4X(dev_priv))
3753 			latency *= 10;
3754 		else if (level > 0)
3755 			latency *= 5;
3756 
3757 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3758 			   level, wm[level], latency / 10, latency % 10);
3759 	}
3760 
3761 	drm_modeset_unlock_all(dev);
3762 }
3763 
3764 static int pri_wm_latency_show(struct seq_file *m, void *data)
3765 {
3766 	struct drm_i915_private *dev_priv = m->private;
3767 	const uint16_t *latencies;
3768 
3769 	if (INTEL_GEN(dev_priv) >= 9)
3770 		latencies = dev_priv->wm.skl_latency;
3771 	else
3772 		latencies = dev_priv->wm.pri_latency;
3773 
3774 	wm_latency_show(m, latencies);
3775 
3776 	return 0;
3777 }
3778 
3779 static int spr_wm_latency_show(struct seq_file *m, void *data)
3780 {
3781 	struct drm_i915_private *dev_priv = m->private;
3782 	const uint16_t *latencies;
3783 
3784 	if (INTEL_GEN(dev_priv) >= 9)
3785 		latencies = dev_priv->wm.skl_latency;
3786 	else
3787 		latencies = dev_priv->wm.spr_latency;
3788 
3789 	wm_latency_show(m, latencies);
3790 
3791 	return 0;
3792 }
3793 
3794 static int cur_wm_latency_show(struct seq_file *m, void *data)
3795 {
3796 	struct drm_i915_private *dev_priv = m->private;
3797 	const uint16_t *latencies;
3798 
3799 	if (INTEL_GEN(dev_priv) >= 9)
3800 		latencies = dev_priv->wm.skl_latency;
3801 	else
3802 		latencies = dev_priv->wm.cur_latency;
3803 
3804 	wm_latency_show(m, latencies);
3805 
3806 	return 0;
3807 }
3808 
3809 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3810 {
3811 	struct drm_i915_private *dev_priv = inode->i_private;
3812 
3813 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3814 		return -ENODEV;
3815 
3816 	return single_open(file, pri_wm_latency_show, dev_priv);
3817 }
3818 
3819 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3820 {
3821 	struct drm_i915_private *dev_priv = inode->i_private;
3822 
3823 	if (HAS_GMCH_DISPLAY(dev_priv))
3824 		return -ENODEV;
3825 
3826 	return single_open(file, spr_wm_latency_show, dev_priv);
3827 }
3828 
3829 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3830 {
3831 	struct drm_i915_private *dev_priv = inode->i_private;
3832 
3833 	if (HAS_GMCH_DISPLAY(dev_priv))
3834 		return -ENODEV;
3835 
3836 	return single_open(file, cur_wm_latency_show, dev_priv);
3837 }
3838 
3839 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3840 				size_t len, loff_t *offp, uint16_t wm[8])
3841 {
3842 	struct seq_file *m = file->private_data;
3843 	struct drm_i915_private *dev_priv = m->private;
3844 	struct drm_device *dev = &dev_priv->drm;
3845 	uint16_t new[8] = { 0 };
3846 	int num_levels;
3847 	int level;
3848 	int ret;
3849 	char tmp[32];
3850 
3851 	if (IS_CHERRYVIEW(dev_priv))
3852 		num_levels = 3;
3853 	else if (IS_VALLEYVIEW(dev_priv))
3854 		num_levels = 1;
3855 	else if (IS_G4X(dev_priv))
3856 		num_levels = 3;
3857 	else
3858 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3859 
3860 	if (len >= sizeof(tmp))
3861 		return -EINVAL;
3862 
3863 	if (copy_from_user(tmp, ubuf, len))
3864 		return -EFAULT;
3865 
3866 	tmp[len] = '\0';
3867 
3868 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3869 		     &new[0], &new[1], &new[2], &new[3],
3870 		     &new[4], &new[5], &new[6], &new[7]);
3871 	if (ret != num_levels)
3872 		return -EINVAL;
3873 
3874 	drm_modeset_lock_all(dev);
3875 
3876 	for (level = 0; level < num_levels; level++)
3877 		wm[level] = new[level];
3878 
3879 	drm_modeset_unlock_all(dev);
3880 
3881 	return len;
3882 }
3883 
3884 
3885 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3886 				    size_t len, loff_t *offp)
3887 {
3888 	struct seq_file *m = file->private_data;
3889 	struct drm_i915_private *dev_priv = m->private;
3890 	uint16_t *latencies;
3891 
3892 	if (INTEL_GEN(dev_priv) >= 9)
3893 		latencies = dev_priv->wm.skl_latency;
3894 	else
3895 		latencies = dev_priv->wm.pri_latency;
3896 
3897 	return wm_latency_write(file, ubuf, len, offp, latencies);
3898 }
3899 
3900 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3901 				    size_t len, loff_t *offp)
3902 {
3903 	struct seq_file *m = file->private_data;
3904 	struct drm_i915_private *dev_priv = m->private;
3905 	uint16_t *latencies;
3906 
3907 	if (INTEL_GEN(dev_priv) >= 9)
3908 		latencies = dev_priv->wm.skl_latency;
3909 	else
3910 		latencies = dev_priv->wm.spr_latency;
3911 
3912 	return wm_latency_write(file, ubuf, len, offp, latencies);
3913 }
3914 
3915 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3916 				    size_t len, loff_t *offp)
3917 {
3918 	struct seq_file *m = file->private_data;
3919 	struct drm_i915_private *dev_priv = m->private;
3920 	uint16_t *latencies;
3921 
3922 	if (INTEL_GEN(dev_priv) >= 9)
3923 		latencies = dev_priv->wm.skl_latency;
3924 	else
3925 		latencies = dev_priv->wm.cur_latency;
3926 
3927 	return wm_latency_write(file, ubuf, len, offp, latencies);
3928 }
3929 
3930 static const struct file_operations i915_pri_wm_latency_fops = {
3931 	.owner = THIS_MODULE,
3932 	.open = pri_wm_latency_open,
3933 	.read = seq_read,
3934 	.llseek = seq_lseek,
3935 	.release = single_release,
3936 	.write = pri_wm_latency_write
3937 };
3938 
3939 static const struct file_operations i915_spr_wm_latency_fops = {
3940 	.owner = THIS_MODULE,
3941 	.open = spr_wm_latency_open,
3942 	.read = seq_read,
3943 	.llseek = seq_lseek,
3944 	.release = single_release,
3945 	.write = spr_wm_latency_write
3946 };
3947 
3948 static const struct file_operations i915_cur_wm_latency_fops = {
3949 	.owner = THIS_MODULE,
3950 	.open = cur_wm_latency_open,
3951 	.read = seq_read,
3952 	.llseek = seq_lseek,
3953 	.release = single_release,
3954 	.write = cur_wm_latency_write
3955 };
3956 
3957 static int
3958 i915_wedged_get(void *data, u64 *val)
3959 {
3960 	struct drm_i915_private *dev_priv = data;
3961 
3962 	*val = i915_terminally_wedged(&dev_priv->gpu_error);
3963 
3964 	return 0;
3965 }
3966 
3967 static int
3968 i915_wedged_set(void *data, u64 val)
3969 {
3970 	struct drm_i915_private *i915 = data;
3971 	struct intel_engine_cs *engine;
3972 	unsigned int tmp;
3973 
3974 	/*
3975 	 * There is no safeguard against this debugfs entry colliding
3976 	 * with the hangcheck calling same i915_handle_error() in
3977 	 * parallel, causing an explosion. For now we assume that the
3978 	 * test harness is responsible enough not to inject gpu hangs
3979 	 * while it is writing to 'i915_wedged'
3980 	 */
3981 
3982 	if (i915_reset_backoff(&i915->gpu_error))
3983 		return -EAGAIN;
3984 
3985 	for_each_engine_masked(engine, i915, val, tmp) {
3986 		engine->hangcheck.seqno = intel_engine_get_seqno(engine);
3987 		engine->hangcheck.stalled = true;
3988 	}
3989 
3990 	i915_handle_error(i915, val, "Manually set wedged engine mask = %llx",
3991 			  val);
3992 
3993 	wait_on_bit(&i915->gpu_error.flags,
3994 		    I915_RESET_HANDOFF,
3995 		    TASK_UNINTERRUPTIBLE);
3996 
3997 	return 0;
3998 }
3999 
4000 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4001 			i915_wedged_get, i915_wedged_set,
4002 			"%llu\n");
4003 
4004 static int
4005 fault_irq_set(struct drm_i915_private *i915,
4006 	      unsigned long *irq,
4007 	      unsigned long val)
4008 {
4009 	int err;
4010 
4011 	err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4012 	if (err)
4013 		return err;
4014 
4015 	err = i915_gem_wait_for_idle(i915,
4016 				     I915_WAIT_LOCKED |
4017 				     I915_WAIT_INTERRUPTIBLE);
4018 	if (err)
4019 		goto err_unlock;
4020 
4021 	*irq = val;
4022 	mutex_unlock(&i915->drm.struct_mutex);
4023 
4024 	/* Flush idle worker to disarm irq */
4025 	drain_delayed_work(&i915->gt.idle_work);
4026 
4027 	return 0;
4028 
4029 err_unlock:
4030 	mutex_unlock(&i915->drm.struct_mutex);
4031 	return err;
4032 }
4033 
4034 static int
4035 i915_ring_missed_irq_get(void *data, u64 *val)
4036 {
4037 	struct drm_i915_private *dev_priv = data;
4038 
4039 	*val = dev_priv->gpu_error.missed_irq_rings;
4040 	return 0;
4041 }
4042 
4043 static int
4044 i915_ring_missed_irq_set(void *data, u64 val)
4045 {
4046 	struct drm_i915_private *i915 = data;
4047 
4048 	return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4049 }
4050 
4051 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4052 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4053 			"0x%08llx\n");
4054 
4055 static int
4056 i915_ring_test_irq_get(void *data, u64 *val)
4057 {
4058 	struct drm_i915_private *dev_priv = data;
4059 
4060 	*val = dev_priv->gpu_error.test_irq_rings;
4061 
4062 	return 0;
4063 }
4064 
4065 static int
4066 i915_ring_test_irq_set(void *data, u64 val)
4067 {
4068 	struct drm_i915_private *i915 = data;
4069 
4070 	val &= INTEL_INFO(i915)->ring_mask;
4071 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4072 
4073 	return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4074 }
4075 
4076 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4077 			i915_ring_test_irq_get, i915_ring_test_irq_set,
4078 			"0x%08llx\n");
4079 
4080 #define DROP_UNBOUND	BIT(0)
4081 #define DROP_BOUND	BIT(1)
4082 #define DROP_RETIRE	BIT(2)
4083 #define DROP_ACTIVE	BIT(3)
4084 #define DROP_FREED	BIT(4)
4085 #define DROP_SHRINK_ALL	BIT(5)
4086 #define DROP_IDLE	BIT(6)
4087 #define DROP_ALL (DROP_UNBOUND	| \
4088 		  DROP_BOUND	| \
4089 		  DROP_RETIRE	| \
4090 		  DROP_ACTIVE	| \
4091 		  DROP_FREED	| \
4092 		  DROP_SHRINK_ALL |\
4093 		  DROP_IDLE)
4094 static int
4095 i915_drop_caches_get(void *data, u64 *val)
4096 {
4097 	*val = DROP_ALL;
4098 
4099 	return 0;
4100 }
4101 
4102 static int
4103 i915_drop_caches_set(void *data, u64 val)
4104 {
4105 	struct drm_i915_private *dev_priv = data;
4106 	struct drm_device *dev = &dev_priv->drm;
4107 	int ret = 0;
4108 
4109 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4110 		  val, val & DROP_ALL);
4111 
4112 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4113 	 * on ioctls on -EAGAIN. */
4114 	if (val & (DROP_ACTIVE | DROP_RETIRE)) {
4115 		ret = mutex_lock_interruptible(&dev->struct_mutex);
4116 		if (ret)
4117 			return ret;
4118 
4119 		if (val & DROP_ACTIVE)
4120 			ret = i915_gem_wait_for_idle(dev_priv,
4121 						     I915_WAIT_INTERRUPTIBLE |
4122 						     I915_WAIT_LOCKED);
4123 
4124 		if (val & DROP_RETIRE)
4125 			i915_retire_requests(dev_priv);
4126 
4127 		mutex_unlock(&dev->struct_mutex);
4128 	}
4129 
4130 	fs_reclaim_acquire(GFP_KERNEL);
4131 	if (val & DROP_BOUND)
4132 		i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND);
4133 
4134 	if (val & DROP_UNBOUND)
4135 		i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4136 
4137 	if (val & DROP_SHRINK_ALL)
4138 		i915_gem_shrink_all(dev_priv);
4139 	fs_reclaim_release(GFP_KERNEL);
4140 
4141 	if (val & DROP_IDLE)
4142 		drain_delayed_work(&dev_priv->gt.idle_work);
4143 
4144 	if (val & DROP_FREED)
4145 		i915_gem_drain_freed_objects(dev_priv);
4146 
4147 	return ret;
4148 }
4149 
4150 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4151 			i915_drop_caches_get, i915_drop_caches_set,
4152 			"0x%08llx\n");
4153 
4154 static int
4155 i915_max_freq_get(void *data, u64 *val)
4156 {
4157 	struct drm_i915_private *dev_priv = data;
4158 
4159 	if (INTEL_GEN(dev_priv) < 6)
4160 		return -ENODEV;
4161 
4162 	*val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit);
4163 	return 0;
4164 }
4165 
4166 static int
4167 i915_max_freq_set(void *data, u64 val)
4168 {
4169 	struct drm_i915_private *dev_priv = data;
4170 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4171 	u32 hw_max, hw_min;
4172 	int ret;
4173 
4174 	if (INTEL_GEN(dev_priv) < 6)
4175 		return -ENODEV;
4176 
4177 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4178 
4179 	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
4180 	if (ret)
4181 		return ret;
4182 
4183 	/*
4184 	 * Turbo will still be enabled, but won't go above the set value.
4185 	 */
4186 	val = intel_freq_opcode(dev_priv, val);
4187 
4188 	hw_max = rps->max_freq;
4189 	hw_min = rps->min_freq;
4190 
4191 	if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) {
4192 		mutex_unlock(&dev_priv->pcu_lock);
4193 		return -EINVAL;
4194 	}
4195 
4196 	rps->max_freq_softlimit = val;
4197 
4198 	if (intel_set_rps(dev_priv, val))
4199 		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4200 
4201 	mutex_unlock(&dev_priv->pcu_lock);
4202 
4203 	return 0;
4204 }
4205 
4206 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4207 			i915_max_freq_get, i915_max_freq_set,
4208 			"%llu\n");
4209 
4210 static int
4211 i915_min_freq_get(void *data, u64 *val)
4212 {
4213 	struct drm_i915_private *dev_priv = data;
4214 
4215 	if (INTEL_GEN(dev_priv) < 6)
4216 		return -ENODEV;
4217 
4218 	*val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit);
4219 	return 0;
4220 }
4221 
4222 static int
4223 i915_min_freq_set(void *data, u64 val)
4224 {
4225 	struct drm_i915_private *dev_priv = data;
4226 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
4227 	u32 hw_max, hw_min;
4228 	int ret;
4229 
4230 	if (INTEL_GEN(dev_priv) < 6)
4231 		return -ENODEV;
4232 
4233 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4234 
4235 	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
4236 	if (ret)
4237 		return ret;
4238 
4239 	/*
4240 	 * Turbo will still be enabled, but won't go below the set value.
4241 	 */
4242 	val = intel_freq_opcode(dev_priv, val);
4243 
4244 	hw_max = rps->max_freq;
4245 	hw_min = rps->min_freq;
4246 
4247 	if (val < hw_min ||
4248 	    val > hw_max || val > rps->max_freq_softlimit) {
4249 		mutex_unlock(&dev_priv->pcu_lock);
4250 		return -EINVAL;
4251 	}
4252 
4253 	rps->min_freq_softlimit = val;
4254 
4255 	if (intel_set_rps(dev_priv, val))
4256 		DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
4257 
4258 	mutex_unlock(&dev_priv->pcu_lock);
4259 
4260 	return 0;
4261 }
4262 
4263 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4264 			i915_min_freq_get, i915_min_freq_set,
4265 			"%llu\n");
4266 
4267 static int
4268 i915_cache_sharing_get(void *data, u64 *val)
4269 {
4270 	struct drm_i915_private *dev_priv = data;
4271 	u32 snpcr;
4272 
4273 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4274 		return -ENODEV;
4275 
4276 	intel_runtime_pm_get(dev_priv);
4277 
4278 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4279 
4280 	intel_runtime_pm_put(dev_priv);
4281 
4282 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4283 
4284 	return 0;
4285 }
4286 
4287 static int
4288 i915_cache_sharing_set(void *data, u64 val)
4289 {
4290 	struct drm_i915_private *dev_priv = data;
4291 	u32 snpcr;
4292 
4293 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4294 		return -ENODEV;
4295 
4296 	if (val > 3)
4297 		return -EINVAL;
4298 
4299 	intel_runtime_pm_get(dev_priv);
4300 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4301 
4302 	/* Update the cache sharing policy here as well */
4303 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4304 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
4305 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4306 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4307 
4308 	intel_runtime_pm_put(dev_priv);
4309 	return 0;
4310 }
4311 
4312 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4313 			i915_cache_sharing_get, i915_cache_sharing_set,
4314 			"%llu\n");
4315 
4316 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4317 					  struct sseu_dev_info *sseu)
4318 {
4319 	int ss_max = 2;
4320 	int ss;
4321 	u32 sig1[ss_max], sig2[ss_max];
4322 
4323 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4324 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4325 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4326 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4327 
4328 	for (ss = 0; ss < ss_max; ss++) {
4329 		unsigned int eu_cnt;
4330 
4331 		if (sig1[ss] & CHV_SS_PG_ENABLE)
4332 			/* skip disabled subslice */
4333 			continue;
4334 
4335 		sseu->slice_mask = BIT(0);
4336 		sseu->subslice_mask[0] |= BIT(ss);
4337 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4338 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4339 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4340 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4341 		sseu->eu_total += eu_cnt;
4342 		sseu->eu_per_subslice = max_t(unsigned int,
4343 					      sseu->eu_per_subslice, eu_cnt);
4344 	}
4345 }
4346 
4347 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4348 				     struct sseu_dev_info *sseu)
4349 {
4350 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4351 	int s, ss;
4352 	u32 s_reg[info->sseu.max_slices];
4353 	u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
4354 
4355 	for (s = 0; s < info->sseu.max_slices; s++) {
4356 		/*
4357 		 * FIXME: Valid SS Mask respects the spec and read
4358 		 * only valid bits for those registers, excluding reserverd
4359 		 * although this seems wrong because it would leave many
4360 		 * subslices without ACK.
4361 		 */
4362 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4363 			GEN10_PGCTL_VALID_SS_MASK(s);
4364 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4365 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4366 	}
4367 
4368 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4369 		     GEN9_PGCTL_SSA_EU19_ACK |
4370 		     GEN9_PGCTL_SSA_EU210_ACK |
4371 		     GEN9_PGCTL_SSA_EU311_ACK;
4372 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4373 		     GEN9_PGCTL_SSB_EU19_ACK |
4374 		     GEN9_PGCTL_SSB_EU210_ACK |
4375 		     GEN9_PGCTL_SSB_EU311_ACK;
4376 
4377 	for (s = 0; s < info->sseu.max_slices; s++) {
4378 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4379 			/* skip disabled slice */
4380 			continue;
4381 
4382 		sseu->slice_mask |= BIT(s);
4383 		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4384 
4385 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4386 			unsigned int eu_cnt;
4387 
4388 			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4389 				/* skip disabled subslice */
4390 				continue;
4391 
4392 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4393 					       eu_mask[ss % 2]);
4394 			sseu->eu_total += eu_cnt;
4395 			sseu->eu_per_subslice = max_t(unsigned int,
4396 						      sseu->eu_per_subslice,
4397 						      eu_cnt);
4398 		}
4399 	}
4400 }
4401 
4402 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4403 				    struct sseu_dev_info *sseu)
4404 {
4405 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4406 	int s, ss;
4407 	u32 s_reg[info->sseu.max_slices];
4408 	u32 eu_reg[2 * info->sseu.max_subslices], eu_mask[2];
4409 
4410 	for (s = 0; s < info->sseu.max_slices; s++) {
4411 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4412 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4413 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4414 	}
4415 
4416 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4417 		     GEN9_PGCTL_SSA_EU19_ACK |
4418 		     GEN9_PGCTL_SSA_EU210_ACK |
4419 		     GEN9_PGCTL_SSA_EU311_ACK;
4420 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4421 		     GEN9_PGCTL_SSB_EU19_ACK |
4422 		     GEN9_PGCTL_SSB_EU210_ACK |
4423 		     GEN9_PGCTL_SSB_EU311_ACK;
4424 
4425 	for (s = 0; s < info->sseu.max_slices; s++) {
4426 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4427 			/* skip disabled slice */
4428 			continue;
4429 
4430 		sseu->slice_mask |= BIT(s);
4431 
4432 		if (IS_GEN9_BC(dev_priv))
4433 			sseu->subslice_mask[s] =
4434 				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4435 
4436 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4437 			unsigned int eu_cnt;
4438 
4439 			if (IS_GEN9_LP(dev_priv)) {
4440 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4441 					/* skip disabled subslice */
4442 					continue;
4443 
4444 				sseu->subslice_mask[s] |= BIT(ss);
4445 			}
4446 
4447 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4448 					       eu_mask[ss%2]);
4449 			sseu->eu_total += eu_cnt;
4450 			sseu->eu_per_subslice = max_t(unsigned int,
4451 						      sseu->eu_per_subslice,
4452 						      eu_cnt);
4453 		}
4454 	}
4455 }
4456 
4457 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4458 					 struct sseu_dev_info *sseu)
4459 {
4460 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4461 	int s;
4462 
4463 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4464 
4465 	if (sseu->slice_mask) {
4466 		sseu->eu_per_subslice =
4467 				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4468 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4469 			sseu->subslice_mask[s] =
4470 				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4471 		}
4472 		sseu->eu_total = sseu->eu_per_subslice *
4473 				 sseu_subslice_total(sseu);
4474 
4475 		/* subtract fused off EU(s) from enabled slice(s) */
4476 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4477 			u8 subslice_7eu =
4478 				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4479 
4480 			sseu->eu_total -= hweight8(subslice_7eu);
4481 		}
4482 	}
4483 }
4484 
4485 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4486 				 const struct sseu_dev_info *sseu)
4487 {
4488 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4489 	const char *type = is_available_info ? "Available" : "Enabled";
4490 	int s;
4491 
4492 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
4493 		   sseu->slice_mask);
4494 	seq_printf(m, "  %s Slice Total: %u\n", type,
4495 		   hweight8(sseu->slice_mask));
4496 	seq_printf(m, "  %s Subslice Total: %u\n", type,
4497 		   sseu_subslice_total(sseu));
4498 	for (s = 0; s < fls(sseu->slice_mask); s++) {
4499 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4500 			   s, hweight8(sseu->subslice_mask[s]));
4501 	}
4502 	seq_printf(m, "  %s EU Total: %u\n", type,
4503 		   sseu->eu_total);
4504 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4505 		   sseu->eu_per_subslice);
4506 
4507 	if (!is_available_info)
4508 		return;
4509 
4510 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4511 	if (HAS_POOLED_EU(dev_priv))
4512 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4513 
4514 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4515 		   yesno(sseu->has_slice_pg));
4516 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4517 		   yesno(sseu->has_subslice_pg));
4518 	seq_printf(m, "  Has EU Power Gating: %s\n",
4519 		   yesno(sseu->has_eu_pg));
4520 }
4521 
4522 static int i915_sseu_status(struct seq_file *m, void *unused)
4523 {
4524 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4525 	struct sseu_dev_info sseu;
4526 
4527 	if (INTEL_GEN(dev_priv) < 8)
4528 		return -ENODEV;
4529 
4530 	seq_puts(m, "SSEU Device Info\n");
4531 	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4532 
4533 	seq_puts(m, "SSEU Device Status\n");
4534 	memset(&sseu, 0, sizeof(sseu));
4535 	sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4536 	sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4537 	sseu.max_eus_per_subslice =
4538 		INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4539 
4540 	intel_runtime_pm_get(dev_priv);
4541 
4542 	if (IS_CHERRYVIEW(dev_priv)) {
4543 		cherryview_sseu_device_status(dev_priv, &sseu);
4544 	} else if (IS_BROADWELL(dev_priv)) {
4545 		broadwell_sseu_device_status(dev_priv, &sseu);
4546 	} else if (IS_GEN9(dev_priv)) {
4547 		gen9_sseu_device_status(dev_priv, &sseu);
4548 	} else if (INTEL_GEN(dev_priv) >= 10) {
4549 		gen10_sseu_device_status(dev_priv, &sseu);
4550 	}
4551 
4552 	intel_runtime_pm_put(dev_priv);
4553 
4554 	i915_print_sseu_info(m, false, &sseu);
4555 
4556 	return 0;
4557 }
4558 
4559 static int i915_forcewake_open(struct inode *inode, struct file *file)
4560 {
4561 	struct drm_i915_private *i915 = inode->i_private;
4562 
4563 	if (INTEL_GEN(i915) < 6)
4564 		return 0;
4565 
4566 	intel_runtime_pm_get(i915);
4567 	intel_uncore_forcewake_user_get(i915);
4568 
4569 	return 0;
4570 }
4571 
4572 static int i915_forcewake_release(struct inode *inode, struct file *file)
4573 {
4574 	struct drm_i915_private *i915 = inode->i_private;
4575 
4576 	if (INTEL_GEN(i915) < 6)
4577 		return 0;
4578 
4579 	intel_uncore_forcewake_user_put(i915);
4580 	intel_runtime_pm_put(i915);
4581 
4582 	return 0;
4583 }
4584 
4585 static const struct file_operations i915_forcewake_fops = {
4586 	.owner = THIS_MODULE,
4587 	.open = i915_forcewake_open,
4588 	.release = i915_forcewake_release,
4589 };
4590 
4591 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4592 {
4593 	struct drm_i915_private *dev_priv = m->private;
4594 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4595 
4596 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4597 	seq_printf(m, "Detected: %s\n",
4598 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4599 
4600 	return 0;
4601 }
4602 
4603 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4604 					const char __user *ubuf, size_t len,
4605 					loff_t *offp)
4606 {
4607 	struct seq_file *m = file->private_data;
4608 	struct drm_i915_private *dev_priv = m->private;
4609 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4610 	unsigned int new_threshold;
4611 	int i;
4612 	char *newline;
4613 	char tmp[16];
4614 
4615 	if (len >= sizeof(tmp))
4616 		return -EINVAL;
4617 
4618 	if (copy_from_user(tmp, ubuf, len))
4619 		return -EFAULT;
4620 
4621 	tmp[len] = '\0';
4622 
4623 	/* Strip newline, if any */
4624 	newline = strchr(tmp, '\n');
4625 	if (newline)
4626 		*newline = '\0';
4627 
4628 	if (strcmp(tmp, "reset") == 0)
4629 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4630 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4631 		return -EINVAL;
4632 
4633 	if (new_threshold > 0)
4634 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4635 			      new_threshold);
4636 	else
4637 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4638 
4639 	spin_lock_irq(&dev_priv->irq_lock);
4640 	hotplug->hpd_storm_threshold = new_threshold;
4641 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4642 	for_each_hpd_pin(i)
4643 		hotplug->stats[i].count = 0;
4644 	spin_unlock_irq(&dev_priv->irq_lock);
4645 
4646 	/* Re-enable hpd immediately if we were in an irq storm */
4647 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4648 
4649 	return len;
4650 }
4651 
4652 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4653 {
4654 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4655 }
4656 
4657 static const struct file_operations i915_hpd_storm_ctl_fops = {
4658 	.owner = THIS_MODULE,
4659 	.open = i915_hpd_storm_ctl_open,
4660 	.read = seq_read,
4661 	.llseek = seq_lseek,
4662 	.release = single_release,
4663 	.write = i915_hpd_storm_ctl_write
4664 };
4665 
4666 static int i915_drrs_ctl_set(void *data, u64 val)
4667 {
4668 	struct drm_i915_private *dev_priv = data;
4669 	struct drm_device *dev = &dev_priv->drm;
4670 	struct intel_crtc *intel_crtc;
4671 	struct intel_encoder *encoder;
4672 	struct intel_dp *intel_dp;
4673 
4674 	if (INTEL_GEN(dev_priv) < 7)
4675 		return -ENODEV;
4676 
4677 	drm_modeset_lock_all(dev);
4678 	for_each_intel_crtc(dev, intel_crtc) {
4679 		if (!intel_crtc->base.state->active ||
4680 					!intel_crtc->config->has_drrs)
4681 			continue;
4682 
4683 		for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4684 			if (encoder->type != INTEL_OUTPUT_EDP)
4685 				continue;
4686 
4687 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4688 						val ? "en" : "dis", val);
4689 
4690 			intel_dp = enc_to_intel_dp(&encoder->base);
4691 			if (val)
4692 				intel_edp_drrs_enable(intel_dp,
4693 							intel_crtc->config);
4694 			else
4695 				intel_edp_drrs_disable(intel_dp,
4696 							intel_crtc->config);
4697 		}
4698 	}
4699 	drm_modeset_unlock_all(dev);
4700 
4701 	return 0;
4702 }
4703 
4704 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4705 
4706 static const struct drm_info_list i915_debugfs_list[] = {
4707 	{"i915_capabilities", i915_capabilities, 0},
4708 	{"i915_gem_objects", i915_gem_object_info, 0},
4709 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4710 	{"i915_gem_stolen", i915_gem_stolen_list_info },
4711 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4712 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4713 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4714 	{"i915_guc_info", i915_guc_info, 0},
4715 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4716 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4717 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4718 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4719 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4720 	{"i915_frequency_info", i915_frequency_info, 0},
4721 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4722 	{"i915_reset_info", i915_reset_info, 0},
4723 	{"i915_drpc_info", i915_drpc_info, 0},
4724 	{"i915_emon_status", i915_emon_status, 0},
4725 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4726 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4727 	{"i915_fbc_status", i915_fbc_status, 0},
4728 	{"i915_ips_status", i915_ips_status, 0},
4729 	{"i915_sr_status", i915_sr_status, 0},
4730 	{"i915_opregion", i915_opregion, 0},
4731 	{"i915_vbt", i915_vbt, 0},
4732 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4733 	{"i915_context_status", i915_context_status, 0},
4734 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4735 	{"i915_swizzle_info", i915_swizzle_info, 0},
4736 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4737 	{"i915_llc", i915_llc, 0},
4738 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4739 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
4740 	{"i915_energy_uJ", i915_energy_uJ, 0},
4741 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4742 	{"i915_power_domain_info", i915_power_domain_info, 0},
4743 	{"i915_dmc_info", i915_dmc_info, 0},
4744 	{"i915_display_info", i915_display_info, 0},
4745 	{"i915_engine_info", i915_engine_info, 0},
4746 	{"i915_rcs_topology", i915_rcs_topology, 0},
4747 	{"i915_shrinker_info", i915_shrinker_info, 0},
4748 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4749 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4750 	{"i915_wa_registers", i915_wa_registers, 0},
4751 	{"i915_ddb_info", i915_ddb_info, 0},
4752 	{"i915_sseu_status", i915_sseu_status, 0},
4753 	{"i915_drrs_status", i915_drrs_status, 0},
4754 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4755 };
4756 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4757 
4758 static const struct i915_debugfs_files {
4759 	const char *name;
4760 	const struct file_operations *fops;
4761 } i915_debugfs_files[] = {
4762 	{"i915_wedged", &i915_wedged_fops},
4763 	{"i915_max_freq", &i915_max_freq_fops},
4764 	{"i915_min_freq", &i915_min_freq_fops},
4765 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4766 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4767 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4768 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4769 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4770 	{"i915_error_state", &i915_error_state_fops},
4771 	{"i915_gpu_info", &i915_gpu_info_fops},
4772 #endif
4773 	{"i915_next_seqno", &i915_next_seqno_fops},
4774 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4775 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4776 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4777 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4778 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4779 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4780 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4781 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4782 	{"i915_guc_log_control", &i915_guc_log_control_fops},
4783 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4784 	{"i915_ipc_status", &i915_ipc_status_fops},
4785 	{"i915_drrs_ctl", &i915_drrs_ctl_fops}
4786 };
4787 
4788 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4789 {
4790 	struct drm_minor *minor = dev_priv->drm.primary;
4791 	struct dentry *ent;
4792 	int ret, i;
4793 
4794 	ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4795 				  minor->debugfs_root, to_i915(minor->dev),
4796 				  &i915_forcewake_fops);
4797 	if (!ent)
4798 		return -ENOMEM;
4799 
4800 	ret = intel_pipe_crc_create(minor);
4801 	if (ret)
4802 		return ret;
4803 
4804 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4805 		ent = debugfs_create_file(i915_debugfs_files[i].name,
4806 					  S_IRUGO | S_IWUSR,
4807 					  minor->debugfs_root,
4808 					  to_i915(minor->dev),
4809 					  i915_debugfs_files[i].fops);
4810 		if (!ent)
4811 			return -ENOMEM;
4812 	}
4813 
4814 	return drm_debugfs_create_files(i915_debugfs_list,
4815 					I915_DEBUGFS_ENTRIES,
4816 					minor->debugfs_root, minor);
4817 }
4818 
4819 struct dpcd_block {
4820 	/* DPCD dump start address. */
4821 	unsigned int offset;
4822 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4823 	unsigned int end;
4824 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4825 	size_t size;
4826 	/* Only valid for eDP. */
4827 	bool edp;
4828 };
4829 
4830 static const struct dpcd_block i915_dpcd_debug[] = {
4831 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4832 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4833 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4834 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4835 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4836 	{ .offset = DP_SET_POWER },
4837 	{ .offset = DP_EDP_DPCD_REV },
4838 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4839 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4840 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4841 };
4842 
4843 static int i915_dpcd_show(struct seq_file *m, void *data)
4844 {
4845 	struct drm_connector *connector = m->private;
4846 	struct intel_dp *intel_dp =
4847 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4848 	uint8_t buf[16];
4849 	ssize_t err;
4850 	int i;
4851 
4852 	if (connector->status != connector_status_connected)
4853 		return -ENODEV;
4854 
4855 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4856 		const struct dpcd_block *b = &i915_dpcd_debug[i];
4857 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4858 
4859 		if (b->edp &&
4860 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4861 			continue;
4862 
4863 		/* low tech for now */
4864 		if (WARN_ON(size > sizeof(buf)))
4865 			continue;
4866 
4867 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4868 		if (err <= 0) {
4869 			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4870 				  size, b->offset, err);
4871 			continue;
4872 		}
4873 
4874 		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4875 	}
4876 
4877 	return 0;
4878 }
4879 
4880 static int i915_dpcd_open(struct inode *inode, struct file *file)
4881 {
4882 	return single_open(file, i915_dpcd_show, inode->i_private);
4883 }
4884 
4885 static const struct file_operations i915_dpcd_fops = {
4886 	.owner = THIS_MODULE,
4887 	.open = i915_dpcd_open,
4888 	.read = seq_read,
4889 	.llseek = seq_lseek,
4890 	.release = single_release,
4891 };
4892 
4893 static int i915_panel_show(struct seq_file *m, void *data)
4894 {
4895 	struct drm_connector *connector = m->private;
4896 	struct intel_dp *intel_dp =
4897 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4898 
4899 	if (connector->status != connector_status_connected)
4900 		return -ENODEV;
4901 
4902 	seq_printf(m, "Panel power up delay: %d\n",
4903 		   intel_dp->panel_power_up_delay);
4904 	seq_printf(m, "Panel power down delay: %d\n",
4905 		   intel_dp->panel_power_down_delay);
4906 	seq_printf(m, "Backlight on delay: %d\n",
4907 		   intel_dp->backlight_on_delay);
4908 	seq_printf(m, "Backlight off delay: %d\n",
4909 		   intel_dp->backlight_off_delay);
4910 
4911 	return 0;
4912 }
4913 
4914 static int i915_panel_open(struct inode *inode, struct file *file)
4915 {
4916 	return single_open(file, i915_panel_show, inode->i_private);
4917 }
4918 
4919 static const struct file_operations i915_panel_fops = {
4920 	.owner = THIS_MODULE,
4921 	.open = i915_panel_open,
4922 	.read = seq_read,
4923 	.llseek = seq_lseek,
4924 	.release = single_release,
4925 };
4926 
4927 /**
4928  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4929  * @connector: pointer to a registered drm_connector
4930  *
4931  * Cleanup will be done by drm_connector_unregister() through a call to
4932  * drm_debugfs_connector_remove().
4933  *
4934  * Returns 0 on success, negative error codes on error.
4935  */
4936 int i915_debugfs_connector_add(struct drm_connector *connector)
4937 {
4938 	struct dentry *root = connector->debugfs_entry;
4939 
4940 	/* The connector must have been registered beforehands. */
4941 	if (!root)
4942 		return -ENODEV;
4943 
4944 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4945 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4946 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4947 				    connector, &i915_dpcd_fops);
4948 
4949 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4950 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4951 				    connector, &i915_panel_fops);
4952 
4953 	return 0;
4954 }
4955