1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
37 #include <drm/drmP.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
41 #include "i915_drv.h"
42 
43 enum {
44 	ACTIVE_LIST,
45 	INACTIVE_LIST,
46 	PINNED_LIST,
47 };
48 
49 /* As the drm_debugfs_init() routines are called before dev->dev_private is
50  * allocated we need to hook into the minor for release. */
51 static int
52 drm_add_fake_info_node(struct drm_minor *minor,
53 		       struct dentry *ent,
54 		       const void *key)
55 {
56 	struct drm_info_node *node;
57 
58 	node = kmalloc(sizeof(*node), GFP_KERNEL);
59 	if (node == NULL) {
60 		debugfs_remove(ent);
61 		return -ENOMEM;
62 	}
63 
64 	node->minor = minor;
65 	node->dent = ent;
66 	node->info_ent = (void *) key;
67 
68 	mutex_lock(&minor->debugfs_lock);
69 	list_add(&node->list, &minor->debugfs_list);
70 	mutex_unlock(&minor->debugfs_lock);
71 
72 	return 0;
73 }
74 
75 static int i915_capabilities(struct seq_file *m, void *data)
76 {
77 	struct drm_info_node *node = m->private;
78 	struct drm_device *dev = node->minor->dev;
79 	const struct intel_device_info *info = INTEL_INFO(dev);
80 
81 	seq_printf(m, "gen: %d\n", info->gen);
82 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
83 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
84 #define SEP_SEMICOLON ;
85 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
86 #undef PRINT_FLAG
87 #undef SEP_SEMICOLON
88 
89 	return 0;
90 }
91 
92 static const char get_active_flag(struct drm_i915_gem_object *obj)
93 {
94 	return obj->active ? '*' : ' ';
95 }
96 
97 static const char get_pin_flag(struct drm_i915_gem_object *obj)
98 {
99 	return obj->pin_display ? 'p' : ' ';
100 }
101 
102 static const char get_tiling_flag(struct drm_i915_gem_object *obj)
103 {
104 	switch (obj->tiling_mode) {
105 	default:
106 	case I915_TILING_NONE: return ' ';
107 	case I915_TILING_X: return 'X';
108 	case I915_TILING_Y: return 'Y';
109 	}
110 }
111 
112 static inline const char get_global_flag(struct drm_i915_gem_object *obj)
113 {
114 	return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
115 }
116 
117 static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
118 {
119 	return obj->mapping ? 'M' : ' ';
120 }
121 
122 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
123 {
124 	u64 size = 0;
125 	struct i915_vma *vma;
126 
127 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
128 		if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
129 			size += vma->node.size;
130 	}
131 
132 	return size;
133 }
134 
135 static void
136 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
137 {
138 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
139 	struct intel_engine_cs *engine;
140 	struct i915_vma *vma;
141 	int pin_count = 0;
142 	enum intel_engine_id id;
143 
144 	lockdep_assert_held(&obj->base.dev->struct_mutex);
145 
146 	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
147 		   &obj->base,
148 		   get_active_flag(obj),
149 		   get_pin_flag(obj),
150 		   get_tiling_flag(obj),
151 		   get_global_flag(obj),
152 		   get_pin_mapped_flag(obj),
153 		   obj->base.size / 1024,
154 		   obj->base.read_domains,
155 		   obj->base.write_domain);
156 	for_each_engine_id(engine, dev_priv, id)
157 		seq_printf(m, "%x ",
158 				i915_gem_request_get_seqno(obj->last_read_req[id]));
159 	seq_printf(m, "] %x %x%s%s%s",
160 		   i915_gem_request_get_seqno(obj->last_write_req),
161 		   i915_gem_request_get_seqno(obj->last_fenced_req),
162 		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
163 		   obj->dirty ? " dirty" : "",
164 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
165 	if (obj->base.name)
166 		seq_printf(m, " (name: %d)", obj->base.name);
167 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
168 		if (vma->pin_count > 0)
169 			pin_count++;
170 	}
171 	seq_printf(m, " (pinned x %d)", pin_count);
172 	if (obj->pin_display)
173 		seq_printf(m, " (display)");
174 	if (obj->fence_reg != I915_FENCE_REG_NONE)
175 		seq_printf(m, " (fence: %d)", obj->fence_reg);
176 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
177 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
178 			   vma->is_ggtt ? "g" : "pp",
179 			   vma->node.start, vma->node.size);
180 		if (vma->is_ggtt)
181 			seq_printf(m, ", type: %u", vma->ggtt_view.type);
182 		seq_puts(m, ")");
183 	}
184 	if (obj->stolen)
185 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
186 	if (obj->pin_display || obj->fault_mappable) {
187 		char s[3], *t = s;
188 		if (obj->pin_display)
189 			*t++ = 'p';
190 		if (obj->fault_mappable)
191 			*t++ = 'f';
192 		*t = '\0';
193 		seq_printf(m, " (%s mappable)", s);
194 	}
195 	if (obj->last_write_req != NULL)
196 		seq_printf(m, " (%s)",
197 			   i915_gem_request_get_engine(obj->last_write_req)->name);
198 	if (obj->frontbuffer_bits)
199 		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
200 }
201 
202 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
203 {
204 	seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
205 	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
206 	seq_putc(m, ' ');
207 }
208 
209 static int i915_gem_object_list_info(struct seq_file *m, void *data)
210 {
211 	struct drm_info_node *node = m->private;
212 	uintptr_t list = (uintptr_t) node->info_ent->data;
213 	struct list_head *head;
214 	struct drm_device *dev = node->minor->dev;
215 	struct drm_i915_private *dev_priv = to_i915(dev);
216 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
217 	struct i915_vma *vma;
218 	u64 total_obj_size, total_gtt_size;
219 	int count, ret;
220 
221 	ret = mutex_lock_interruptible(&dev->struct_mutex);
222 	if (ret)
223 		return ret;
224 
225 	/* FIXME: the user of this interface might want more than just GGTT */
226 	switch (list) {
227 	case ACTIVE_LIST:
228 		seq_puts(m, "Active:\n");
229 		head = &ggtt->base.active_list;
230 		break;
231 	case INACTIVE_LIST:
232 		seq_puts(m, "Inactive:\n");
233 		head = &ggtt->base.inactive_list;
234 		break;
235 	default:
236 		mutex_unlock(&dev->struct_mutex);
237 		return -EINVAL;
238 	}
239 
240 	total_obj_size = total_gtt_size = count = 0;
241 	list_for_each_entry(vma, head, vm_link) {
242 		seq_printf(m, "   ");
243 		describe_obj(m, vma->obj);
244 		seq_printf(m, "\n");
245 		total_obj_size += vma->obj->base.size;
246 		total_gtt_size += vma->node.size;
247 		count++;
248 	}
249 	mutex_unlock(&dev->struct_mutex);
250 
251 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
252 		   count, total_obj_size, total_gtt_size);
253 	return 0;
254 }
255 
256 static int obj_rank_by_stolen(void *priv,
257 			      struct list_head *A, struct list_head *B)
258 {
259 	struct drm_i915_gem_object *a =
260 		container_of(A, struct drm_i915_gem_object, obj_exec_link);
261 	struct drm_i915_gem_object *b =
262 		container_of(B, struct drm_i915_gem_object, obj_exec_link);
263 
264 	if (a->stolen->start < b->stolen->start)
265 		return -1;
266 	if (a->stolen->start > b->stolen->start)
267 		return 1;
268 	return 0;
269 }
270 
271 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
272 {
273 	struct drm_info_node *node = m->private;
274 	struct drm_device *dev = node->minor->dev;
275 	struct drm_i915_private *dev_priv = dev->dev_private;
276 	struct drm_i915_gem_object *obj;
277 	u64 total_obj_size, total_gtt_size;
278 	LIST_HEAD(stolen);
279 	int count, ret;
280 
281 	ret = mutex_lock_interruptible(&dev->struct_mutex);
282 	if (ret)
283 		return ret;
284 
285 	total_obj_size = total_gtt_size = count = 0;
286 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
287 		if (obj->stolen == NULL)
288 			continue;
289 
290 		list_add(&obj->obj_exec_link, &stolen);
291 
292 		total_obj_size += obj->base.size;
293 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
294 		count++;
295 	}
296 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
297 		if (obj->stolen == NULL)
298 			continue;
299 
300 		list_add(&obj->obj_exec_link, &stolen);
301 
302 		total_obj_size += obj->base.size;
303 		count++;
304 	}
305 	list_sort(NULL, &stolen, obj_rank_by_stolen);
306 	seq_puts(m, "Stolen:\n");
307 	while (!list_empty(&stolen)) {
308 		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
309 		seq_puts(m, "   ");
310 		describe_obj(m, obj);
311 		seq_putc(m, '\n');
312 		list_del_init(&obj->obj_exec_link);
313 	}
314 	mutex_unlock(&dev->struct_mutex);
315 
316 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
317 		   count, total_obj_size, total_gtt_size);
318 	return 0;
319 }
320 
321 #define count_objects(list, member) do { \
322 	list_for_each_entry(obj, list, member) { \
323 		size += i915_gem_obj_total_ggtt_size(obj); \
324 		++count; \
325 		if (obj->map_and_fenceable) { \
326 			mappable_size += i915_gem_obj_ggtt_size(obj); \
327 			++mappable_count; \
328 		} \
329 	} \
330 } while (0)
331 
332 struct file_stats {
333 	struct drm_i915_file_private *file_priv;
334 	unsigned long count;
335 	u64 total, unbound;
336 	u64 global, shared;
337 	u64 active, inactive;
338 };
339 
340 static int per_file_stats(int id, void *ptr, void *data)
341 {
342 	struct drm_i915_gem_object *obj = ptr;
343 	struct file_stats *stats = data;
344 	struct i915_vma *vma;
345 
346 	stats->count++;
347 	stats->total += obj->base.size;
348 
349 	if (obj->base.name || obj->base.dma_buf)
350 		stats->shared += obj->base.size;
351 
352 	if (USES_FULL_PPGTT(obj->base.dev)) {
353 		list_for_each_entry(vma, &obj->vma_list, obj_link) {
354 			struct i915_hw_ppgtt *ppgtt;
355 
356 			if (!drm_mm_node_allocated(&vma->node))
357 				continue;
358 
359 			if (vma->is_ggtt) {
360 				stats->global += obj->base.size;
361 				continue;
362 			}
363 
364 			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
365 			if (ppgtt->file_priv != stats->file_priv)
366 				continue;
367 
368 			if (obj->active) /* XXX per-vma statistic */
369 				stats->active += obj->base.size;
370 			else
371 				stats->inactive += obj->base.size;
372 
373 			return 0;
374 		}
375 	} else {
376 		if (i915_gem_obj_ggtt_bound(obj)) {
377 			stats->global += obj->base.size;
378 			if (obj->active)
379 				stats->active += obj->base.size;
380 			else
381 				stats->inactive += obj->base.size;
382 			return 0;
383 		}
384 	}
385 
386 	if (!list_empty(&obj->global_list))
387 		stats->unbound += obj->base.size;
388 
389 	return 0;
390 }
391 
392 #define print_file_stats(m, name, stats) do { \
393 	if (stats.count) \
394 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
395 			   name, \
396 			   stats.count, \
397 			   stats.total, \
398 			   stats.active, \
399 			   stats.inactive, \
400 			   stats.global, \
401 			   stats.shared, \
402 			   stats.unbound); \
403 } while (0)
404 
405 static void print_batch_pool_stats(struct seq_file *m,
406 				   struct drm_i915_private *dev_priv)
407 {
408 	struct drm_i915_gem_object *obj;
409 	struct file_stats stats;
410 	struct intel_engine_cs *engine;
411 	int j;
412 
413 	memset(&stats, 0, sizeof(stats));
414 
415 	for_each_engine(engine, dev_priv) {
416 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
417 			list_for_each_entry(obj,
418 					    &engine->batch_pool.cache_list[j],
419 					    batch_pool_link)
420 				per_file_stats(0, obj, &stats);
421 		}
422 	}
423 
424 	print_file_stats(m, "[k]batch pool", stats);
425 }
426 
427 #define count_vmas(list, member) do { \
428 	list_for_each_entry(vma, list, member) { \
429 		size += i915_gem_obj_total_ggtt_size(vma->obj); \
430 		++count; \
431 		if (vma->obj->map_and_fenceable) { \
432 			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
433 			++mappable_count; \
434 		} \
435 	} \
436 } while (0)
437 
438 static int i915_gem_object_info(struct seq_file *m, void* data)
439 {
440 	struct drm_info_node *node = m->private;
441 	struct drm_device *dev = node->minor->dev;
442 	struct drm_i915_private *dev_priv = to_i915(dev);
443 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
444 	u32 count, mappable_count, purgeable_count;
445 	u64 size, mappable_size, purgeable_size;
446 	unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
447 	u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
448 	struct drm_i915_gem_object *obj;
449 	struct drm_file *file;
450 	struct i915_vma *vma;
451 	int ret;
452 
453 	ret = mutex_lock_interruptible(&dev->struct_mutex);
454 	if (ret)
455 		return ret;
456 
457 	seq_printf(m, "%u objects, %zu bytes\n",
458 		   dev_priv->mm.object_count,
459 		   dev_priv->mm.object_memory);
460 
461 	size = count = mappable_size = mappable_count = 0;
462 	count_objects(&dev_priv->mm.bound_list, global_list);
463 	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
464 		   count, mappable_count, size, mappable_size);
465 
466 	size = count = mappable_size = mappable_count = 0;
467 	count_vmas(&ggtt->base.active_list, vm_link);
468 	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
469 		   count, mappable_count, size, mappable_size);
470 
471 	size = count = mappable_size = mappable_count = 0;
472 	count_vmas(&ggtt->base.inactive_list, vm_link);
473 	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
474 		   count, mappable_count, size, mappable_size);
475 
476 	size = count = purgeable_size = purgeable_count = 0;
477 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
478 		size += obj->base.size, ++count;
479 		if (obj->madv == I915_MADV_DONTNEED)
480 			purgeable_size += obj->base.size, ++purgeable_count;
481 		if (obj->mapping) {
482 			pin_mapped_count++;
483 			pin_mapped_size += obj->base.size;
484 			if (obj->pages_pin_count == 0) {
485 				pin_mapped_purgeable_count++;
486 				pin_mapped_purgeable_size += obj->base.size;
487 			}
488 		}
489 	}
490 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
491 
492 	size = count = mappable_size = mappable_count = 0;
493 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
494 		if (obj->fault_mappable) {
495 			size += i915_gem_obj_ggtt_size(obj);
496 			++count;
497 		}
498 		if (obj->pin_display) {
499 			mappable_size += i915_gem_obj_ggtt_size(obj);
500 			++mappable_count;
501 		}
502 		if (obj->madv == I915_MADV_DONTNEED) {
503 			purgeable_size += obj->base.size;
504 			++purgeable_count;
505 		}
506 		if (obj->mapping) {
507 			pin_mapped_count++;
508 			pin_mapped_size += obj->base.size;
509 			if (obj->pages_pin_count == 0) {
510 				pin_mapped_purgeable_count++;
511 				pin_mapped_purgeable_size += obj->base.size;
512 			}
513 		}
514 	}
515 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
516 		   purgeable_count, purgeable_size);
517 	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
518 		   mappable_count, mappable_size);
519 	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
520 		   count, size);
521 	seq_printf(m,
522 		   "%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
523 		   pin_mapped_count, pin_mapped_purgeable_count,
524 		   pin_mapped_size, pin_mapped_purgeable_size);
525 
526 	seq_printf(m, "%llu [%llu] gtt total\n",
527 		   ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
528 
529 	seq_putc(m, '\n');
530 	print_batch_pool_stats(m, dev_priv);
531 
532 	mutex_unlock(&dev->struct_mutex);
533 
534 	mutex_lock(&dev->filelist_mutex);
535 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
536 		struct file_stats stats;
537 		struct task_struct *task;
538 
539 		memset(&stats, 0, sizeof(stats));
540 		stats.file_priv = file->driver_priv;
541 		spin_lock(&file->table_lock);
542 		idr_for_each(&file->object_idr, per_file_stats, &stats);
543 		spin_unlock(&file->table_lock);
544 		/*
545 		 * Although we have a valid reference on file->pid, that does
546 		 * not guarantee that the task_struct who called get_pid() is
547 		 * still alive (e.g. get_pid(current) => fork() => exit()).
548 		 * Therefore, we need to protect this ->comm access using RCU.
549 		 */
550 		rcu_read_lock();
551 		task = pid_task(file->pid, PIDTYPE_PID);
552 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
553 		rcu_read_unlock();
554 	}
555 	mutex_unlock(&dev->filelist_mutex);
556 
557 	return 0;
558 }
559 
560 static int i915_gem_gtt_info(struct seq_file *m, void *data)
561 {
562 	struct drm_info_node *node = m->private;
563 	struct drm_device *dev = node->minor->dev;
564 	uintptr_t list = (uintptr_t) node->info_ent->data;
565 	struct drm_i915_private *dev_priv = dev->dev_private;
566 	struct drm_i915_gem_object *obj;
567 	u64 total_obj_size, total_gtt_size;
568 	int count, ret;
569 
570 	ret = mutex_lock_interruptible(&dev->struct_mutex);
571 	if (ret)
572 		return ret;
573 
574 	total_obj_size = total_gtt_size = count = 0;
575 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
576 		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
577 			continue;
578 
579 		seq_puts(m, "   ");
580 		describe_obj(m, obj);
581 		seq_putc(m, '\n');
582 		total_obj_size += obj->base.size;
583 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
584 		count++;
585 	}
586 
587 	mutex_unlock(&dev->struct_mutex);
588 
589 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
590 		   count, total_obj_size, total_gtt_size);
591 
592 	return 0;
593 }
594 
595 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
596 {
597 	struct drm_info_node *node = m->private;
598 	struct drm_device *dev = node->minor->dev;
599 	struct drm_i915_private *dev_priv = dev->dev_private;
600 	struct intel_crtc *crtc;
601 	int ret;
602 
603 	ret = mutex_lock_interruptible(&dev->struct_mutex);
604 	if (ret)
605 		return ret;
606 
607 	for_each_intel_crtc(dev, crtc) {
608 		const char pipe = pipe_name(crtc->pipe);
609 		const char plane = plane_name(crtc->plane);
610 		struct intel_unpin_work *work;
611 
612 		spin_lock_irq(&dev->event_lock);
613 		work = crtc->unpin_work;
614 		if (work == NULL) {
615 			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
616 				   pipe, plane);
617 		} else {
618 			u32 addr;
619 
620 			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
621 				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
622 					   pipe, plane);
623 			} else {
624 				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
625 					   pipe, plane);
626 			}
627 			if (work->flip_queued_req) {
628 				struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req);
629 
630 				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
631 					   engine->name,
632 					   i915_gem_request_get_seqno(work->flip_queued_req),
633 					   dev_priv->next_seqno,
634 					   engine->get_seqno(engine),
635 					   i915_gem_request_completed(work->flip_queued_req, true));
636 			} else
637 				seq_printf(m, "Flip not associated with any ring\n");
638 			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
639 				   work->flip_queued_vblank,
640 				   work->flip_ready_vblank,
641 				   drm_crtc_vblank_count(&crtc->base));
642 			if (work->enable_stall_check)
643 				seq_puts(m, "Stall check enabled, ");
644 			else
645 				seq_puts(m, "Stall check waiting for page flip ioctl, ");
646 			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
647 
648 			if (INTEL_INFO(dev)->gen >= 4)
649 				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
650 			else
651 				addr = I915_READ(DSPADDR(crtc->plane));
652 			seq_printf(m, "Current scanout address 0x%08x\n", addr);
653 
654 			if (work->pending_flip_obj) {
655 				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
656 				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
657 			}
658 		}
659 		spin_unlock_irq(&dev->event_lock);
660 	}
661 
662 	mutex_unlock(&dev->struct_mutex);
663 
664 	return 0;
665 }
666 
667 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
668 {
669 	struct drm_info_node *node = m->private;
670 	struct drm_device *dev = node->minor->dev;
671 	struct drm_i915_private *dev_priv = dev->dev_private;
672 	struct drm_i915_gem_object *obj;
673 	struct intel_engine_cs *engine;
674 	int total = 0;
675 	int ret, j;
676 
677 	ret = mutex_lock_interruptible(&dev->struct_mutex);
678 	if (ret)
679 		return ret;
680 
681 	for_each_engine(engine, dev_priv) {
682 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
683 			int count;
684 
685 			count = 0;
686 			list_for_each_entry(obj,
687 					    &engine->batch_pool.cache_list[j],
688 					    batch_pool_link)
689 				count++;
690 			seq_printf(m, "%s cache[%d]: %d objects\n",
691 				   engine->name, j, count);
692 
693 			list_for_each_entry(obj,
694 					    &engine->batch_pool.cache_list[j],
695 					    batch_pool_link) {
696 				seq_puts(m, "   ");
697 				describe_obj(m, obj);
698 				seq_putc(m, '\n');
699 			}
700 
701 			total += count;
702 		}
703 	}
704 
705 	seq_printf(m, "total: %d\n", total);
706 
707 	mutex_unlock(&dev->struct_mutex);
708 
709 	return 0;
710 }
711 
712 static int i915_gem_request_info(struct seq_file *m, void *data)
713 {
714 	struct drm_info_node *node = m->private;
715 	struct drm_device *dev = node->minor->dev;
716 	struct drm_i915_private *dev_priv = dev->dev_private;
717 	struct intel_engine_cs *engine;
718 	struct drm_i915_gem_request *req;
719 	int ret, any;
720 
721 	ret = mutex_lock_interruptible(&dev->struct_mutex);
722 	if (ret)
723 		return ret;
724 
725 	any = 0;
726 	for_each_engine(engine, dev_priv) {
727 		int count;
728 
729 		count = 0;
730 		list_for_each_entry(req, &engine->request_list, list)
731 			count++;
732 		if (count == 0)
733 			continue;
734 
735 		seq_printf(m, "%s requests: %d\n", engine->name, count);
736 		list_for_each_entry(req, &engine->request_list, list) {
737 			struct task_struct *task;
738 
739 			rcu_read_lock();
740 			task = NULL;
741 			if (req->pid)
742 				task = pid_task(req->pid, PIDTYPE_PID);
743 			seq_printf(m, "    %x @ %d: %s [%d]\n",
744 				   req->seqno,
745 				   (int) (jiffies - req->emitted_jiffies),
746 				   task ? task->comm : "<unknown>",
747 				   task ? task->pid : -1);
748 			rcu_read_unlock();
749 		}
750 
751 		any++;
752 	}
753 	mutex_unlock(&dev->struct_mutex);
754 
755 	if (any == 0)
756 		seq_puts(m, "No requests\n");
757 
758 	return 0;
759 }
760 
761 static void i915_ring_seqno_info(struct seq_file *m,
762 				 struct intel_engine_cs *engine)
763 {
764 	seq_printf(m, "Current sequence (%s): %x\n",
765 		   engine->name, engine->get_seqno(engine));
766 	seq_printf(m, "Current user interrupts (%s): %x\n",
767 		   engine->name, READ_ONCE(engine->user_interrupts));
768 }
769 
770 static int i915_gem_seqno_info(struct seq_file *m, void *data)
771 {
772 	struct drm_info_node *node = m->private;
773 	struct drm_device *dev = node->minor->dev;
774 	struct drm_i915_private *dev_priv = dev->dev_private;
775 	struct intel_engine_cs *engine;
776 	int ret;
777 
778 	ret = mutex_lock_interruptible(&dev->struct_mutex);
779 	if (ret)
780 		return ret;
781 	intel_runtime_pm_get(dev_priv);
782 
783 	for_each_engine(engine, dev_priv)
784 		i915_ring_seqno_info(m, engine);
785 
786 	intel_runtime_pm_put(dev_priv);
787 	mutex_unlock(&dev->struct_mutex);
788 
789 	return 0;
790 }
791 
792 
793 static int i915_interrupt_info(struct seq_file *m, void *data)
794 {
795 	struct drm_info_node *node = m->private;
796 	struct drm_device *dev = node->minor->dev;
797 	struct drm_i915_private *dev_priv = dev->dev_private;
798 	struct intel_engine_cs *engine;
799 	int ret, i, pipe;
800 
801 	ret = mutex_lock_interruptible(&dev->struct_mutex);
802 	if (ret)
803 		return ret;
804 	intel_runtime_pm_get(dev_priv);
805 
806 	if (IS_CHERRYVIEW(dev)) {
807 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
808 			   I915_READ(GEN8_MASTER_IRQ));
809 
810 		seq_printf(m, "Display IER:\t%08x\n",
811 			   I915_READ(VLV_IER));
812 		seq_printf(m, "Display IIR:\t%08x\n",
813 			   I915_READ(VLV_IIR));
814 		seq_printf(m, "Display IIR_RW:\t%08x\n",
815 			   I915_READ(VLV_IIR_RW));
816 		seq_printf(m, "Display IMR:\t%08x\n",
817 			   I915_READ(VLV_IMR));
818 		for_each_pipe(dev_priv, pipe)
819 			seq_printf(m, "Pipe %c stat:\t%08x\n",
820 				   pipe_name(pipe),
821 				   I915_READ(PIPESTAT(pipe)));
822 
823 		seq_printf(m, "Port hotplug:\t%08x\n",
824 			   I915_READ(PORT_HOTPLUG_EN));
825 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
826 			   I915_READ(VLV_DPFLIPSTAT));
827 		seq_printf(m, "DPINVGTT:\t%08x\n",
828 			   I915_READ(DPINVGTT));
829 
830 		for (i = 0; i < 4; i++) {
831 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
832 				   i, I915_READ(GEN8_GT_IMR(i)));
833 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
834 				   i, I915_READ(GEN8_GT_IIR(i)));
835 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
836 				   i, I915_READ(GEN8_GT_IER(i)));
837 		}
838 
839 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
840 			   I915_READ(GEN8_PCU_IMR));
841 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
842 			   I915_READ(GEN8_PCU_IIR));
843 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
844 			   I915_READ(GEN8_PCU_IER));
845 	} else if (INTEL_INFO(dev)->gen >= 8) {
846 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
847 			   I915_READ(GEN8_MASTER_IRQ));
848 
849 		for (i = 0; i < 4; i++) {
850 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
851 				   i, I915_READ(GEN8_GT_IMR(i)));
852 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
853 				   i, I915_READ(GEN8_GT_IIR(i)));
854 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
855 				   i, I915_READ(GEN8_GT_IER(i)));
856 		}
857 
858 		for_each_pipe(dev_priv, pipe) {
859 			enum intel_display_power_domain power_domain;
860 
861 			power_domain = POWER_DOMAIN_PIPE(pipe);
862 			if (!intel_display_power_get_if_enabled(dev_priv,
863 								power_domain)) {
864 				seq_printf(m, "Pipe %c power disabled\n",
865 					   pipe_name(pipe));
866 				continue;
867 			}
868 			seq_printf(m, "Pipe %c IMR:\t%08x\n",
869 				   pipe_name(pipe),
870 				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
871 			seq_printf(m, "Pipe %c IIR:\t%08x\n",
872 				   pipe_name(pipe),
873 				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
874 			seq_printf(m, "Pipe %c IER:\t%08x\n",
875 				   pipe_name(pipe),
876 				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
877 
878 			intel_display_power_put(dev_priv, power_domain);
879 		}
880 
881 		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
882 			   I915_READ(GEN8_DE_PORT_IMR));
883 		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
884 			   I915_READ(GEN8_DE_PORT_IIR));
885 		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
886 			   I915_READ(GEN8_DE_PORT_IER));
887 
888 		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
889 			   I915_READ(GEN8_DE_MISC_IMR));
890 		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
891 			   I915_READ(GEN8_DE_MISC_IIR));
892 		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
893 			   I915_READ(GEN8_DE_MISC_IER));
894 
895 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
896 			   I915_READ(GEN8_PCU_IMR));
897 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
898 			   I915_READ(GEN8_PCU_IIR));
899 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
900 			   I915_READ(GEN8_PCU_IER));
901 	} else if (IS_VALLEYVIEW(dev)) {
902 		seq_printf(m, "Display IER:\t%08x\n",
903 			   I915_READ(VLV_IER));
904 		seq_printf(m, "Display IIR:\t%08x\n",
905 			   I915_READ(VLV_IIR));
906 		seq_printf(m, "Display IIR_RW:\t%08x\n",
907 			   I915_READ(VLV_IIR_RW));
908 		seq_printf(m, "Display IMR:\t%08x\n",
909 			   I915_READ(VLV_IMR));
910 		for_each_pipe(dev_priv, pipe)
911 			seq_printf(m, "Pipe %c stat:\t%08x\n",
912 				   pipe_name(pipe),
913 				   I915_READ(PIPESTAT(pipe)));
914 
915 		seq_printf(m, "Master IER:\t%08x\n",
916 			   I915_READ(VLV_MASTER_IER));
917 
918 		seq_printf(m, "Render IER:\t%08x\n",
919 			   I915_READ(GTIER));
920 		seq_printf(m, "Render IIR:\t%08x\n",
921 			   I915_READ(GTIIR));
922 		seq_printf(m, "Render IMR:\t%08x\n",
923 			   I915_READ(GTIMR));
924 
925 		seq_printf(m, "PM IER:\t\t%08x\n",
926 			   I915_READ(GEN6_PMIER));
927 		seq_printf(m, "PM IIR:\t\t%08x\n",
928 			   I915_READ(GEN6_PMIIR));
929 		seq_printf(m, "PM IMR:\t\t%08x\n",
930 			   I915_READ(GEN6_PMIMR));
931 
932 		seq_printf(m, "Port hotplug:\t%08x\n",
933 			   I915_READ(PORT_HOTPLUG_EN));
934 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
935 			   I915_READ(VLV_DPFLIPSTAT));
936 		seq_printf(m, "DPINVGTT:\t%08x\n",
937 			   I915_READ(DPINVGTT));
938 
939 	} else if (!HAS_PCH_SPLIT(dev)) {
940 		seq_printf(m, "Interrupt enable:    %08x\n",
941 			   I915_READ(IER));
942 		seq_printf(m, "Interrupt identity:  %08x\n",
943 			   I915_READ(IIR));
944 		seq_printf(m, "Interrupt mask:      %08x\n",
945 			   I915_READ(IMR));
946 		for_each_pipe(dev_priv, pipe)
947 			seq_printf(m, "Pipe %c stat:         %08x\n",
948 				   pipe_name(pipe),
949 				   I915_READ(PIPESTAT(pipe)));
950 	} else {
951 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
952 			   I915_READ(DEIER));
953 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
954 			   I915_READ(DEIIR));
955 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
956 			   I915_READ(DEIMR));
957 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
958 			   I915_READ(SDEIER));
959 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
960 			   I915_READ(SDEIIR));
961 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
962 			   I915_READ(SDEIMR));
963 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
964 			   I915_READ(GTIER));
965 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
966 			   I915_READ(GTIIR));
967 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
968 			   I915_READ(GTIMR));
969 	}
970 	for_each_engine(engine, dev_priv) {
971 		if (INTEL_INFO(dev)->gen >= 6) {
972 			seq_printf(m,
973 				   "Graphics Interrupt mask (%s):	%08x\n",
974 				   engine->name, I915_READ_IMR(engine));
975 		}
976 		i915_ring_seqno_info(m, engine);
977 	}
978 	intel_runtime_pm_put(dev_priv);
979 	mutex_unlock(&dev->struct_mutex);
980 
981 	return 0;
982 }
983 
984 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
985 {
986 	struct drm_info_node *node = m->private;
987 	struct drm_device *dev = node->minor->dev;
988 	struct drm_i915_private *dev_priv = dev->dev_private;
989 	int i, ret;
990 
991 	ret = mutex_lock_interruptible(&dev->struct_mutex);
992 	if (ret)
993 		return ret;
994 
995 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
996 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
997 		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
998 
999 		seq_printf(m, "Fence %d, pin count = %d, object = ",
1000 			   i, dev_priv->fence_regs[i].pin_count);
1001 		if (obj == NULL)
1002 			seq_puts(m, "unused");
1003 		else
1004 			describe_obj(m, obj);
1005 		seq_putc(m, '\n');
1006 	}
1007 
1008 	mutex_unlock(&dev->struct_mutex);
1009 	return 0;
1010 }
1011 
1012 static int i915_hws_info(struct seq_file *m, void *data)
1013 {
1014 	struct drm_info_node *node = m->private;
1015 	struct drm_device *dev = node->minor->dev;
1016 	struct drm_i915_private *dev_priv = dev->dev_private;
1017 	struct intel_engine_cs *engine;
1018 	const u32 *hws;
1019 	int i;
1020 
1021 	engine = &dev_priv->engine[(uintptr_t)node->info_ent->data];
1022 	hws = engine->status_page.page_addr;
1023 	if (hws == NULL)
1024 		return 0;
1025 
1026 	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
1027 		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1028 			   i * 4,
1029 			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
1030 	}
1031 	return 0;
1032 }
1033 
1034 static ssize_t
1035 i915_error_state_write(struct file *filp,
1036 		       const char __user *ubuf,
1037 		       size_t cnt,
1038 		       loff_t *ppos)
1039 {
1040 	struct i915_error_state_file_priv *error_priv = filp->private_data;
1041 	struct drm_device *dev = error_priv->dev;
1042 	int ret;
1043 
1044 	DRM_DEBUG_DRIVER("Resetting error state\n");
1045 
1046 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1047 	if (ret)
1048 		return ret;
1049 
1050 	i915_destroy_error_state(dev);
1051 	mutex_unlock(&dev->struct_mutex);
1052 
1053 	return cnt;
1054 }
1055 
1056 static int i915_error_state_open(struct inode *inode, struct file *file)
1057 {
1058 	struct drm_device *dev = inode->i_private;
1059 	struct i915_error_state_file_priv *error_priv;
1060 
1061 	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
1062 	if (!error_priv)
1063 		return -ENOMEM;
1064 
1065 	error_priv->dev = dev;
1066 
1067 	i915_error_state_get(dev, error_priv);
1068 
1069 	file->private_data = error_priv;
1070 
1071 	return 0;
1072 }
1073 
1074 static int i915_error_state_release(struct inode *inode, struct file *file)
1075 {
1076 	struct i915_error_state_file_priv *error_priv = file->private_data;
1077 
1078 	i915_error_state_put(error_priv);
1079 	kfree(error_priv);
1080 
1081 	return 0;
1082 }
1083 
1084 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1085 				     size_t count, loff_t *pos)
1086 {
1087 	struct i915_error_state_file_priv *error_priv = file->private_data;
1088 	struct drm_i915_error_state_buf error_str;
1089 	loff_t tmp_pos = 0;
1090 	ssize_t ret_count = 0;
1091 	int ret;
1092 
1093 	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
1094 	if (ret)
1095 		return ret;
1096 
1097 	ret = i915_error_state_to_str(&error_str, error_priv);
1098 	if (ret)
1099 		goto out;
1100 
1101 	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1102 					    error_str.buf,
1103 					    error_str.bytes);
1104 
1105 	if (ret_count < 0)
1106 		ret = ret_count;
1107 	else
1108 		*pos = error_str.start + ret_count;
1109 out:
1110 	i915_error_state_buf_release(&error_str);
1111 	return ret ?: ret_count;
1112 }
1113 
1114 static const struct file_operations i915_error_state_fops = {
1115 	.owner = THIS_MODULE,
1116 	.open = i915_error_state_open,
1117 	.read = i915_error_state_read,
1118 	.write = i915_error_state_write,
1119 	.llseek = default_llseek,
1120 	.release = i915_error_state_release,
1121 };
1122 
1123 static int
1124 i915_next_seqno_get(void *data, u64 *val)
1125 {
1126 	struct drm_device *dev = data;
1127 	struct drm_i915_private *dev_priv = dev->dev_private;
1128 	int ret;
1129 
1130 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1131 	if (ret)
1132 		return ret;
1133 
1134 	*val = dev_priv->next_seqno;
1135 	mutex_unlock(&dev->struct_mutex);
1136 
1137 	return 0;
1138 }
1139 
1140 static int
1141 i915_next_seqno_set(void *data, u64 val)
1142 {
1143 	struct drm_device *dev = data;
1144 	int ret;
1145 
1146 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1147 	if (ret)
1148 		return ret;
1149 
1150 	ret = i915_gem_set_seqno(dev, val);
1151 	mutex_unlock(&dev->struct_mutex);
1152 
1153 	return ret;
1154 }
1155 
1156 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1157 			i915_next_seqno_get, i915_next_seqno_set,
1158 			"0x%llx\n");
1159 
1160 static int i915_frequency_info(struct seq_file *m, void *unused)
1161 {
1162 	struct drm_info_node *node = m->private;
1163 	struct drm_device *dev = node->minor->dev;
1164 	struct drm_i915_private *dev_priv = dev->dev_private;
1165 	int ret = 0;
1166 
1167 	intel_runtime_pm_get(dev_priv);
1168 
1169 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1170 
1171 	if (IS_GEN5(dev)) {
1172 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1173 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1174 
1175 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1176 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1177 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1178 			   MEMSTAT_VID_SHIFT);
1179 		seq_printf(m, "Current P-state: %d\n",
1180 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1181 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1182 		u32 freq_sts;
1183 
1184 		mutex_lock(&dev_priv->rps.hw_lock);
1185 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1186 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1187 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1188 
1189 		seq_printf(m, "actual GPU freq: %d MHz\n",
1190 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1191 
1192 		seq_printf(m, "current GPU freq: %d MHz\n",
1193 			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1194 
1195 		seq_printf(m, "max GPU freq: %d MHz\n",
1196 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1197 
1198 		seq_printf(m, "min GPU freq: %d MHz\n",
1199 			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1200 
1201 		seq_printf(m, "idle GPU freq: %d MHz\n",
1202 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1203 
1204 		seq_printf(m,
1205 			   "efficient (RPe) frequency: %d MHz\n",
1206 			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1207 		mutex_unlock(&dev_priv->rps.hw_lock);
1208 	} else if (INTEL_INFO(dev)->gen >= 6) {
1209 		u32 rp_state_limits;
1210 		u32 gt_perf_status;
1211 		u32 rp_state_cap;
1212 		u32 rpmodectl, rpinclimit, rpdeclimit;
1213 		u32 rpstat, cagf, reqf;
1214 		u32 rpupei, rpcurup, rpprevup;
1215 		u32 rpdownei, rpcurdown, rpprevdown;
1216 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1217 		int max_freq;
1218 
1219 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1220 		if (IS_BROXTON(dev)) {
1221 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1222 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1223 		} else {
1224 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1225 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1226 		}
1227 
1228 		/* RPSTAT1 is in the GT power well */
1229 		ret = mutex_lock_interruptible(&dev->struct_mutex);
1230 		if (ret)
1231 			goto out;
1232 
1233 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1234 
1235 		reqf = I915_READ(GEN6_RPNSWREQ);
1236 		if (IS_GEN9(dev))
1237 			reqf >>= 23;
1238 		else {
1239 			reqf &= ~GEN6_TURBO_DISABLE;
1240 			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1241 				reqf >>= 24;
1242 			else
1243 				reqf >>= 25;
1244 		}
1245 		reqf = intel_gpu_freq(dev_priv, reqf);
1246 
1247 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1248 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1249 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1250 
1251 		rpstat = I915_READ(GEN6_RPSTAT1);
1252 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1253 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1254 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1255 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1256 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1257 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1258 		if (IS_GEN9(dev))
1259 			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1260 		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1261 			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1262 		else
1263 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1264 		cagf = intel_gpu_freq(dev_priv, cagf);
1265 
1266 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1267 		mutex_unlock(&dev->struct_mutex);
1268 
1269 		if (IS_GEN6(dev) || IS_GEN7(dev)) {
1270 			pm_ier = I915_READ(GEN6_PMIER);
1271 			pm_imr = I915_READ(GEN6_PMIMR);
1272 			pm_isr = I915_READ(GEN6_PMISR);
1273 			pm_iir = I915_READ(GEN6_PMIIR);
1274 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1275 		} else {
1276 			pm_ier = I915_READ(GEN8_GT_IER(2));
1277 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1278 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1279 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1280 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1281 		}
1282 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1283 			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1284 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1285 		seq_printf(m, "Render p-state ratio: %d\n",
1286 			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
1287 		seq_printf(m, "Render p-state VID: %d\n",
1288 			   gt_perf_status & 0xff);
1289 		seq_printf(m, "Render p-state limit: %d\n",
1290 			   rp_state_limits & 0xff);
1291 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1292 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1293 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1294 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1295 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1296 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1297 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1298 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1299 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
1300 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1301 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
1302 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1303 		seq_printf(m, "Up threshold: %d%%\n",
1304 			   dev_priv->rps.up_threshold);
1305 
1306 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1307 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1308 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1309 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1310 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1311 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1312 		seq_printf(m, "Down threshold: %d%%\n",
1313 			   dev_priv->rps.down_threshold);
1314 
1315 		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
1316 			    rp_state_cap >> 16) & 0xff;
1317 		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1318 			     GEN9_FREQ_SCALER : 1);
1319 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1320 			   intel_gpu_freq(dev_priv, max_freq));
1321 
1322 		max_freq = (rp_state_cap & 0xff00) >> 8;
1323 		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1324 			     GEN9_FREQ_SCALER : 1);
1325 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1326 			   intel_gpu_freq(dev_priv, max_freq));
1327 
1328 		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
1329 			    rp_state_cap >> 0) & 0xff;
1330 		max_freq *= (IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1331 			     GEN9_FREQ_SCALER : 1);
1332 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1333 			   intel_gpu_freq(dev_priv, max_freq));
1334 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1335 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1336 
1337 		seq_printf(m, "Current freq: %d MHz\n",
1338 			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1339 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1340 		seq_printf(m, "Idle freq: %d MHz\n",
1341 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1342 		seq_printf(m, "Min freq: %d MHz\n",
1343 			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1344 		seq_printf(m, "Max freq: %d MHz\n",
1345 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1346 		seq_printf(m,
1347 			   "efficient (RPe) frequency: %d MHz\n",
1348 			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1349 	} else {
1350 		seq_puts(m, "no P-state info available\n");
1351 	}
1352 
1353 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
1354 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1355 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1356 
1357 out:
1358 	intel_runtime_pm_put(dev_priv);
1359 	return ret;
1360 }
1361 
1362 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1363 {
1364 	struct drm_info_node *node = m->private;
1365 	struct drm_device *dev = node->minor->dev;
1366 	struct drm_i915_private *dev_priv = dev->dev_private;
1367 	struct intel_engine_cs *engine;
1368 	u64 acthd[I915_NUM_ENGINES];
1369 	u32 seqno[I915_NUM_ENGINES];
1370 	u32 instdone[I915_NUM_INSTDONE_REG];
1371 	enum intel_engine_id id;
1372 	int j;
1373 
1374 	if (!i915.enable_hangcheck) {
1375 		seq_printf(m, "Hangcheck disabled\n");
1376 		return 0;
1377 	}
1378 
1379 	intel_runtime_pm_get(dev_priv);
1380 
1381 	for_each_engine_id(engine, dev_priv, id) {
1382 		acthd[id] = intel_ring_get_active_head(engine);
1383 		seqno[id] = engine->get_seqno(engine);
1384 	}
1385 
1386 	i915_get_extra_instdone(dev, instdone);
1387 
1388 	intel_runtime_pm_put(dev_priv);
1389 
1390 	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1391 		seq_printf(m, "Hangcheck active, fires in %dms\n",
1392 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1393 					    jiffies));
1394 	} else
1395 		seq_printf(m, "Hangcheck inactive\n");
1396 
1397 	for_each_engine_id(engine, dev_priv, id) {
1398 		seq_printf(m, "%s:\n", engine->name);
1399 		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1400 			   engine->hangcheck.seqno,
1401 			   seqno[id],
1402 			   engine->last_submitted_seqno);
1403 		seq_printf(m, "\tuser interrupts = %x [current %x]\n",
1404 			   engine->hangcheck.user_interrupts,
1405 			   READ_ONCE(engine->user_interrupts));
1406 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1407 			   (long long)engine->hangcheck.acthd,
1408 			   (long long)acthd[id]);
1409 		seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
1410 		seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
1411 
1412 		if (engine->id == RCS) {
1413 			seq_puts(m, "\tinstdone read =");
1414 
1415 			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
1416 				seq_printf(m, " 0x%08x", instdone[j]);
1417 
1418 			seq_puts(m, "\n\tinstdone accu =");
1419 
1420 			for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
1421 				seq_printf(m, " 0x%08x",
1422 					   engine->hangcheck.instdone[j]);
1423 
1424 			seq_puts(m, "\n");
1425 		}
1426 	}
1427 
1428 	return 0;
1429 }
1430 
1431 static int ironlake_drpc_info(struct seq_file *m)
1432 {
1433 	struct drm_info_node *node = m->private;
1434 	struct drm_device *dev = node->minor->dev;
1435 	struct drm_i915_private *dev_priv = dev->dev_private;
1436 	u32 rgvmodectl, rstdbyctl;
1437 	u16 crstandvid;
1438 	int ret;
1439 
1440 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1441 	if (ret)
1442 		return ret;
1443 	intel_runtime_pm_get(dev_priv);
1444 
1445 	rgvmodectl = I915_READ(MEMMODECTL);
1446 	rstdbyctl = I915_READ(RSTDBYCTL);
1447 	crstandvid = I915_READ16(CRSTANDVID);
1448 
1449 	intel_runtime_pm_put(dev_priv);
1450 	mutex_unlock(&dev->struct_mutex);
1451 
1452 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1453 	seq_printf(m, "Boost freq: %d\n",
1454 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1455 		   MEMMODE_BOOST_FREQ_SHIFT);
1456 	seq_printf(m, "HW control enabled: %s\n",
1457 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1458 	seq_printf(m, "SW control enabled: %s\n",
1459 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1460 	seq_printf(m, "Gated voltage change: %s\n",
1461 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1462 	seq_printf(m, "Starting frequency: P%d\n",
1463 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1464 	seq_printf(m, "Max P-state: P%d\n",
1465 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1466 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1467 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1468 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1469 	seq_printf(m, "Render standby enabled: %s\n",
1470 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1471 	seq_puts(m, "Current RS state: ");
1472 	switch (rstdbyctl & RSX_STATUS_MASK) {
1473 	case RSX_STATUS_ON:
1474 		seq_puts(m, "on\n");
1475 		break;
1476 	case RSX_STATUS_RC1:
1477 		seq_puts(m, "RC1\n");
1478 		break;
1479 	case RSX_STATUS_RC1E:
1480 		seq_puts(m, "RC1E\n");
1481 		break;
1482 	case RSX_STATUS_RS1:
1483 		seq_puts(m, "RS1\n");
1484 		break;
1485 	case RSX_STATUS_RS2:
1486 		seq_puts(m, "RS2 (RC6)\n");
1487 		break;
1488 	case RSX_STATUS_RS3:
1489 		seq_puts(m, "RC3 (RC6+)\n");
1490 		break;
1491 	default:
1492 		seq_puts(m, "unknown\n");
1493 		break;
1494 	}
1495 
1496 	return 0;
1497 }
1498 
1499 static int i915_forcewake_domains(struct seq_file *m, void *data)
1500 {
1501 	struct drm_info_node *node = m->private;
1502 	struct drm_device *dev = node->minor->dev;
1503 	struct drm_i915_private *dev_priv = dev->dev_private;
1504 	struct intel_uncore_forcewake_domain *fw_domain;
1505 
1506 	spin_lock_irq(&dev_priv->uncore.lock);
1507 	for_each_fw_domain(fw_domain, dev_priv) {
1508 		seq_printf(m, "%s.wake_count = %u\n",
1509 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1510 			   fw_domain->wake_count);
1511 	}
1512 	spin_unlock_irq(&dev_priv->uncore.lock);
1513 
1514 	return 0;
1515 }
1516 
1517 static int vlv_drpc_info(struct seq_file *m)
1518 {
1519 	struct drm_info_node *node = m->private;
1520 	struct drm_device *dev = node->minor->dev;
1521 	struct drm_i915_private *dev_priv = dev->dev_private;
1522 	u32 rpmodectl1, rcctl1, pw_status;
1523 
1524 	intel_runtime_pm_get(dev_priv);
1525 
1526 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1527 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1528 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1529 
1530 	intel_runtime_pm_put(dev_priv);
1531 
1532 	seq_printf(m, "Video Turbo Mode: %s\n",
1533 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1534 	seq_printf(m, "Turbo enabled: %s\n",
1535 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1536 	seq_printf(m, "HW control enabled: %s\n",
1537 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1538 	seq_printf(m, "SW control enabled: %s\n",
1539 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1540 			  GEN6_RP_MEDIA_SW_MODE));
1541 	seq_printf(m, "RC6 Enabled: %s\n",
1542 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1543 					GEN6_RC_CTL_EI_MODE(1))));
1544 	seq_printf(m, "Render Power Well: %s\n",
1545 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1546 	seq_printf(m, "Media Power Well: %s\n",
1547 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1548 
1549 	seq_printf(m, "Render RC6 residency since boot: %u\n",
1550 		   I915_READ(VLV_GT_RENDER_RC6));
1551 	seq_printf(m, "Media RC6 residency since boot: %u\n",
1552 		   I915_READ(VLV_GT_MEDIA_RC6));
1553 
1554 	return i915_forcewake_domains(m, NULL);
1555 }
1556 
1557 static int gen6_drpc_info(struct seq_file *m)
1558 {
1559 	struct drm_info_node *node = m->private;
1560 	struct drm_device *dev = node->minor->dev;
1561 	struct drm_i915_private *dev_priv = dev->dev_private;
1562 	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1563 	unsigned forcewake_count;
1564 	int count = 0, ret;
1565 
1566 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1567 	if (ret)
1568 		return ret;
1569 	intel_runtime_pm_get(dev_priv);
1570 
1571 	spin_lock_irq(&dev_priv->uncore.lock);
1572 	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1573 	spin_unlock_irq(&dev_priv->uncore.lock);
1574 
1575 	if (forcewake_count) {
1576 		seq_puts(m, "RC information inaccurate because somebody "
1577 			    "holds a forcewake reference \n");
1578 	} else {
1579 		/* NB: we cannot use forcewake, else we read the wrong values */
1580 		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1581 			udelay(10);
1582 		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1583 	}
1584 
1585 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1586 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1587 
1588 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1589 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1590 	mutex_unlock(&dev->struct_mutex);
1591 	mutex_lock(&dev_priv->rps.hw_lock);
1592 	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1593 	mutex_unlock(&dev_priv->rps.hw_lock);
1594 
1595 	intel_runtime_pm_put(dev_priv);
1596 
1597 	seq_printf(m, "Video Turbo Mode: %s\n",
1598 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1599 	seq_printf(m, "HW control enabled: %s\n",
1600 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1601 	seq_printf(m, "SW control enabled: %s\n",
1602 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1603 			  GEN6_RP_MEDIA_SW_MODE));
1604 	seq_printf(m, "RC1e Enabled: %s\n",
1605 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1606 	seq_printf(m, "RC6 Enabled: %s\n",
1607 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1608 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1609 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1610 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1611 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1612 	seq_puts(m, "Current RC state: ");
1613 	switch (gt_core_status & GEN6_RCn_MASK) {
1614 	case GEN6_RC0:
1615 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1616 			seq_puts(m, "Core Power Down\n");
1617 		else
1618 			seq_puts(m, "on\n");
1619 		break;
1620 	case GEN6_RC3:
1621 		seq_puts(m, "RC3\n");
1622 		break;
1623 	case GEN6_RC6:
1624 		seq_puts(m, "RC6\n");
1625 		break;
1626 	case GEN6_RC7:
1627 		seq_puts(m, "RC7\n");
1628 		break;
1629 	default:
1630 		seq_puts(m, "Unknown\n");
1631 		break;
1632 	}
1633 
1634 	seq_printf(m, "Core Power Down: %s\n",
1635 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1636 
1637 	/* Not exactly sure what this is */
1638 	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1639 		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1640 	seq_printf(m, "RC6 residency since boot: %u\n",
1641 		   I915_READ(GEN6_GT_GFX_RC6));
1642 	seq_printf(m, "RC6+ residency since boot: %u\n",
1643 		   I915_READ(GEN6_GT_GFX_RC6p));
1644 	seq_printf(m, "RC6++ residency since boot: %u\n",
1645 		   I915_READ(GEN6_GT_GFX_RC6pp));
1646 
1647 	seq_printf(m, "RC6   voltage: %dmV\n",
1648 		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1649 	seq_printf(m, "RC6+  voltage: %dmV\n",
1650 		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1651 	seq_printf(m, "RC6++ voltage: %dmV\n",
1652 		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1653 	return 0;
1654 }
1655 
1656 static int i915_drpc_info(struct seq_file *m, void *unused)
1657 {
1658 	struct drm_info_node *node = m->private;
1659 	struct drm_device *dev = node->minor->dev;
1660 
1661 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1662 		return vlv_drpc_info(m);
1663 	else if (INTEL_INFO(dev)->gen >= 6)
1664 		return gen6_drpc_info(m);
1665 	else
1666 		return ironlake_drpc_info(m);
1667 }
1668 
1669 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1670 {
1671 	struct drm_info_node *node = m->private;
1672 	struct drm_device *dev = node->minor->dev;
1673 	struct drm_i915_private *dev_priv = dev->dev_private;
1674 
1675 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1676 		   dev_priv->fb_tracking.busy_bits);
1677 
1678 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1679 		   dev_priv->fb_tracking.flip_bits);
1680 
1681 	return 0;
1682 }
1683 
1684 static int i915_fbc_status(struct seq_file *m, void *unused)
1685 {
1686 	struct drm_info_node *node = m->private;
1687 	struct drm_device *dev = node->minor->dev;
1688 	struct drm_i915_private *dev_priv = dev->dev_private;
1689 
1690 	if (!HAS_FBC(dev)) {
1691 		seq_puts(m, "FBC unsupported on this chipset\n");
1692 		return 0;
1693 	}
1694 
1695 	intel_runtime_pm_get(dev_priv);
1696 	mutex_lock(&dev_priv->fbc.lock);
1697 
1698 	if (intel_fbc_is_active(dev_priv))
1699 		seq_puts(m, "FBC enabled\n");
1700 	else
1701 		seq_printf(m, "FBC disabled: %s\n",
1702 			   dev_priv->fbc.no_fbc_reason);
1703 
1704 	if (INTEL_INFO(dev_priv)->gen >= 7)
1705 		seq_printf(m, "Compressing: %s\n",
1706 			   yesno(I915_READ(FBC_STATUS2) &
1707 				 FBC_COMPRESSION_MASK));
1708 
1709 	mutex_unlock(&dev_priv->fbc.lock);
1710 	intel_runtime_pm_put(dev_priv);
1711 
1712 	return 0;
1713 }
1714 
1715 static int i915_fbc_fc_get(void *data, u64 *val)
1716 {
1717 	struct drm_device *dev = data;
1718 	struct drm_i915_private *dev_priv = dev->dev_private;
1719 
1720 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1721 		return -ENODEV;
1722 
1723 	*val = dev_priv->fbc.false_color;
1724 
1725 	return 0;
1726 }
1727 
1728 static int i915_fbc_fc_set(void *data, u64 val)
1729 {
1730 	struct drm_device *dev = data;
1731 	struct drm_i915_private *dev_priv = dev->dev_private;
1732 	u32 reg;
1733 
1734 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1735 		return -ENODEV;
1736 
1737 	mutex_lock(&dev_priv->fbc.lock);
1738 
1739 	reg = I915_READ(ILK_DPFC_CONTROL);
1740 	dev_priv->fbc.false_color = val;
1741 
1742 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1743 		   (reg | FBC_CTL_FALSE_COLOR) :
1744 		   (reg & ~FBC_CTL_FALSE_COLOR));
1745 
1746 	mutex_unlock(&dev_priv->fbc.lock);
1747 	return 0;
1748 }
1749 
1750 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1751 			i915_fbc_fc_get, i915_fbc_fc_set,
1752 			"%llu\n");
1753 
1754 static int i915_ips_status(struct seq_file *m, void *unused)
1755 {
1756 	struct drm_info_node *node = m->private;
1757 	struct drm_device *dev = node->minor->dev;
1758 	struct drm_i915_private *dev_priv = dev->dev_private;
1759 
1760 	if (!HAS_IPS(dev)) {
1761 		seq_puts(m, "not supported\n");
1762 		return 0;
1763 	}
1764 
1765 	intel_runtime_pm_get(dev_priv);
1766 
1767 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1768 		   yesno(i915.enable_ips));
1769 
1770 	if (INTEL_INFO(dev)->gen >= 8) {
1771 		seq_puts(m, "Currently: unknown\n");
1772 	} else {
1773 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1774 			seq_puts(m, "Currently: enabled\n");
1775 		else
1776 			seq_puts(m, "Currently: disabled\n");
1777 	}
1778 
1779 	intel_runtime_pm_put(dev_priv);
1780 
1781 	return 0;
1782 }
1783 
1784 static int i915_sr_status(struct seq_file *m, void *unused)
1785 {
1786 	struct drm_info_node *node = m->private;
1787 	struct drm_device *dev = node->minor->dev;
1788 	struct drm_i915_private *dev_priv = dev->dev_private;
1789 	bool sr_enabled = false;
1790 
1791 	intel_runtime_pm_get(dev_priv);
1792 
1793 	if (HAS_PCH_SPLIT(dev))
1794 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1795 	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
1796 		 IS_I945G(dev) || IS_I945GM(dev))
1797 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1798 	else if (IS_I915GM(dev))
1799 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1800 	else if (IS_PINEVIEW(dev))
1801 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1802 	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
1803 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1804 
1805 	intel_runtime_pm_put(dev_priv);
1806 
1807 	seq_printf(m, "self-refresh: %s\n",
1808 		   sr_enabled ? "enabled" : "disabled");
1809 
1810 	return 0;
1811 }
1812 
1813 static int i915_emon_status(struct seq_file *m, void *unused)
1814 {
1815 	struct drm_info_node *node = m->private;
1816 	struct drm_device *dev = node->minor->dev;
1817 	struct drm_i915_private *dev_priv = dev->dev_private;
1818 	unsigned long temp, chipset, gfx;
1819 	int ret;
1820 
1821 	if (!IS_GEN5(dev))
1822 		return -ENODEV;
1823 
1824 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1825 	if (ret)
1826 		return ret;
1827 
1828 	temp = i915_mch_val(dev_priv);
1829 	chipset = i915_chipset_val(dev_priv);
1830 	gfx = i915_gfx_val(dev_priv);
1831 	mutex_unlock(&dev->struct_mutex);
1832 
1833 	seq_printf(m, "GMCH temp: %ld\n", temp);
1834 	seq_printf(m, "Chipset power: %ld\n", chipset);
1835 	seq_printf(m, "GFX power: %ld\n", gfx);
1836 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1837 
1838 	return 0;
1839 }
1840 
1841 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1842 {
1843 	struct drm_info_node *node = m->private;
1844 	struct drm_device *dev = node->minor->dev;
1845 	struct drm_i915_private *dev_priv = dev->dev_private;
1846 	int ret = 0;
1847 	int gpu_freq, ia_freq;
1848 	unsigned int max_gpu_freq, min_gpu_freq;
1849 
1850 	if (!HAS_CORE_RING_FREQ(dev)) {
1851 		seq_puts(m, "unsupported on this chipset\n");
1852 		return 0;
1853 	}
1854 
1855 	intel_runtime_pm_get(dev_priv);
1856 
1857 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1858 
1859 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1860 	if (ret)
1861 		goto out;
1862 
1863 	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1864 		/* Convert GT frequency to 50 HZ units */
1865 		min_gpu_freq =
1866 			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1867 		max_gpu_freq =
1868 			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1869 	} else {
1870 		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1871 		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1872 	}
1873 
1874 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1875 
1876 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1877 		ia_freq = gpu_freq;
1878 		sandybridge_pcode_read(dev_priv,
1879 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1880 				       &ia_freq);
1881 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1882 			   intel_gpu_freq(dev_priv, (gpu_freq *
1883 				(IS_SKYLAKE(dev) || IS_KABYLAKE(dev) ?
1884 				 GEN9_FREQ_SCALER : 1))),
1885 			   ((ia_freq >> 0) & 0xff) * 100,
1886 			   ((ia_freq >> 8) & 0xff) * 100);
1887 	}
1888 
1889 	mutex_unlock(&dev_priv->rps.hw_lock);
1890 
1891 out:
1892 	intel_runtime_pm_put(dev_priv);
1893 	return ret;
1894 }
1895 
1896 static int i915_opregion(struct seq_file *m, void *unused)
1897 {
1898 	struct drm_info_node *node = m->private;
1899 	struct drm_device *dev = node->minor->dev;
1900 	struct drm_i915_private *dev_priv = dev->dev_private;
1901 	struct intel_opregion *opregion = &dev_priv->opregion;
1902 	int ret;
1903 
1904 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1905 	if (ret)
1906 		goto out;
1907 
1908 	if (opregion->header)
1909 		seq_write(m, opregion->header, OPREGION_SIZE);
1910 
1911 	mutex_unlock(&dev->struct_mutex);
1912 
1913 out:
1914 	return 0;
1915 }
1916 
1917 static int i915_vbt(struct seq_file *m, void *unused)
1918 {
1919 	struct drm_info_node *node = m->private;
1920 	struct drm_device *dev = node->minor->dev;
1921 	struct drm_i915_private *dev_priv = dev->dev_private;
1922 	struct intel_opregion *opregion = &dev_priv->opregion;
1923 
1924 	if (opregion->vbt)
1925 		seq_write(m, opregion->vbt, opregion->vbt_size);
1926 
1927 	return 0;
1928 }
1929 
1930 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1931 {
1932 	struct drm_info_node *node = m->private;
1933 	struct drm_device *dev = node->minor->dev;
1934 	struct intel_framebuffer *fbdev_fb = NULL;
1935 	struct drm_framebuffer *drm_fb;
1936 	int ret;
1937 
1938 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1939 	if (ret)
1940 		return ret;
1941 
1942 #ifdef CONFIG_DRM_FBDEV_EMULATION
1943        if (to_i915(dev)->fbdev) {
1944                fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
1945 
1946                seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1947                          fbdev_fb->base.width,
1948                          fbdev_fb->base.height,
1949                          fbdev_fb->base.depth,
1950                          fbdev_fb->base.bits_per_pixel,
1951                          fbdev_fb->base.modifier[0],
1952                          drm_framebuffer_read_refcount(&fbdev_fb->base));
1953                describe_obj(m, fbdev_fb->obj);
1954                seq_putc(m, '\n');
1955        }
1956 #endif
1957 
1958 	mutex_lock(&dev->mode_config.fb_lock);
1959 	drm_for_each_fb(drm_fb, dev) {
1960 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1961 		if (fb == fbdev_fb)
1962 			continue;
1963 
1964 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1965 			   fb->base.width,
1966 			   fb->base.height,
1967 			   fb->base.depth,
1968 			   fb->base.bits_per_pixel,
1969 			   fb->base.modifier[0],
1970 			   drm_framebuffer_read_refcount(&fb->base));
1971 		describe_obj(m, fb->obj);
1972 		seq_putc(m, '\n');
1973 	}
1974 	mutex_unlock(&dev->mode_config.fb_lock);
1975 	mutex_unlock(&dev->struct_mutex);
1976 
1977 	return 0;
1978 }
1979 
1980 static void describe_ctx_ringbuf(struct seq_file *m,
1981 				 struct intel_ringbuffer *ringbuf)
1982 {
1983 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1984 		   ringbuf->space, ringbuf->head, ringbuf->tail,
1985 		   ringbuf->last_retired_head);
1986 }
1987 
1988 static int i915_context_status(struct seq_file *m, void *unused)
1989 {
1990 	struct drm_info_node *node = m->private;
1991 	struct drm_device *dev = node->minor->dev;
1992 	struct drm_i915_private *dev_priv = dev->dev_private;
1993 	struct intel_engine_cs *engine;
1994 	struct intel_context *ctx;
1995 	enum intel_engine_id id;
1996 	int ret;
1997 
1998 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1999 	if (ret)
2000 		return ret;
2001 
2002 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
2003 		if (!i915.enable_execlists &&
2004 		    ctx->legacy_hw_ctx.rcs_state == NULL)
2005 			continue;
2006 
2007 		seq_puts(m, "HW context ");
2008 		describe_ctx(m, ctx);
2009 		if (ctx == dev_priv->kernel_context)
2010 			seq_printf(m, "(kernel context) ");
2011 
2012 		if (i915.enable_execlists) {
2013 			seq_putc(m, '\n');
2014 			for_each_engine_id(engine, dev_priv, id) {
2015 				struct drm_i915_gem_object *ctx_obj =
2016 					ctx->engine[id].state;
2017 				struct intel_ringbuffer *ringbuf =
2018 					ctx->engine[id].ringbuf;
2019 
2020 				seq_printf(m, "%s: ", engine->name);
2021 				if (ctx_obj)
2022 					describe_obj(m, ctx_obj);
2023 				if (ringbuf)
2024 					describe_ctx_ringbuf(m, ringbuf);
2025 				seq_putc(m, '\n');
2026 			}
2027 		} else {
2028 			describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
2029 		}
2030 
2031 		seq_putc(m, '\n');
2032 	}
2033 
2034 	mutex_unlock(&dev->struct_mutex);
2035 
2036 	return 0;
2037 }
2038 
2039 static void i915_dump_lrc_obj(struct seq_file *m,
2040 			      struct intel_context *ctx,
2041 			      struct intel_engine_cs *engine)
2042 {
2043 	struct page *page;
2044 	uint32_t *reg_state;
2045 	int j;
2046 	struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2047 	unsigned long ggtt_offset = 0;
2048 
2049 	if (ctx_obj == NULL) {
2050 		seq_printf(m, "Context on %s with no gem object\n",
2051 			   engine->name);
2052 		return;
2053 	}
2054 
2055 	seq_printf(m, "CONTEXT: %s %u\n", engine->name,
2056 		   intel_execlists_ctx_id(ctx, engine));
2057 
2058 	if (!i915_gem_obj_ggtt_bound(ctx_obj))
2059 		seq_puts(m, "\tNot bound in GGTT\n");
2060 	else
2061 		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
2062 
2063 	if (i915_gem_object_get_pages(ctx_obj)) {
2064 		seq_puts(m, "\tFailed to get pages for context object\n");
2065 		return;
2066 	}
2067 
2068 	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
2069 	if (!WARN_ON(page == NULL)) {
2070 		reg_state = kmap_atomic(page);
2071 
2072 		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
2073 			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
2074 				   ggtt_offset + 4096 + (j * 4),
2075 				   reg_state[j], reg_state[j + 1],
2076 				   reg_state[j + 2], reg_state[j + 3]);
2077 		}
2078 		kunmap_atomic(reg_state);
2079 	}
2080 
2081 	seq_putc(m, '\n');
2082 }
2083 
2084 static int i915_dump_lrc(struct seq_file *m, void *unused)
2085 {
2086 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2087 	struct drm_device *dev = node->minor->dev;
2088 	struct drm_i915_private *dev_priv = dev->dev_private;
2089 	struct intel_engine_cs *engine;
2090 	struct intel_context *ctx;
2091 	int ret;
2092 
2093 	if (!i915.enable_execlists) {
2094 		seq_printf(m, "Logical Ring Contexts are disabled\n");
2095 		return 0;
2096 	}
2097 
2098 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2099 	if (ret)
2100 		return ret;
2101 
2102 	list_for_each_entry(ctx, &dev_priv->context_list, link)
2103 		if (ctx != dev_priv->kernel_context) {
2104 			for_each_engine(engine, dev_priv)
2105 				i915_dump_lrc_obj(m, ctx, engine);
2106 		}
2107 
2108 	mutex_unlock(&dev->struct_mutex);
2109 
2110 	return 0;
2111 }
2112 
2113 static int i915_execlists(struct seq_file *m, void *data)
2114 {
2115 	struct drm_info_node *node = (struct drm_info_node *)m->private;
2116 	struct drm_device *dev = node->minor->dev;
2117 	struct drm_i915_private *dev_priv = dev->dev_private;
2118 	struct intel_engine_cs *engine;
2119 	u32 status_pointer;
2120 	u8 read_pointer;
2121 	u8 write_pointer;
2122 	u32 status;
2123 	u32 ctx_id;
2124 	struct list_head *cursor;
2125 	int i, ret;
2126 
2127 	if (!i915.enable_execlists) {
2128 		seq_puts(m, "Logical Ring Contexts are disabled\n");
2129 		return 0;
2130 	}
2131 
2132 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2133 	if (ret)
2134 		return ret;
2135 
2136 	intel_runtime_pm_get(dev_priv);
2137 
2138 	for_each_engine(engine, dev_priv) {
2139 		struct drm_i915_gem_request *head_req = NULL;
2140 		int count = 0;
2141 
2142 		seq_printf(m, "%s\n", engine->name);
2143 
2144 		status = I915_READ(RING_EXECLIST_STATUS_LO(engine));
2145 		ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine));
2146 		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
2147 			   status, ctx_id);
2148 
2149 		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
2150 		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
2151 
2152 		read_pointer = engine->next_context_status_buffer;
2153 		write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
2154 		if (read_pointer > write_pointer)
2155 			write_pointer += GEN8_CSB_ENTRIES;
2156 		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2157 			   read_pointer, write_pointer);
2158 
2159 		for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
2160 			status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i));
2161 			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i));
2162 
2163 			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
2164 				   i, status, ctx_id);
2165 		}
2166 
2167 		spin_lock_bh(&engine->execlist_lock);
2168 		list_for_each(cursor, &engine->execlist_queue)
2169 			count++;
2170 		head_req = list_first_entry_or_null(&engine->execlist_queue,
2171 						    struct drm_i915_gem_request,
2172 						    execlist_link);
2173 		spin_unlock_bh(&engine->execlist_lock);
2174 
2175 		seq_printf(m, "\t%d requests in queue\n", count);
2176 		if (head_req) {
2177 			seq_printf(m, "\tHead request id: %u\n",
2178 				   intel_execlists_ctx_id(head_req->ctx, engine));
2179 			seq_printf(m, "\tHead request tail: %u\n",
2180 				   head_req->tail);
2181 		}
2182 
2183 		seq_putc(m, '\n');
2184 	}
2185 
2186 	intel_runtime_pm_put(dev_priv);
2187 	mutex_unlock(&dev->struct_mutex);
2188 
2189 	return 0;
2190 }
2191 
2192 static const char *swizzle_string(unsigned swizzle)
2193 {
2194 	switch (swizzle) {
2195 	case I915_BIT_6_SWIZZLE_NONE:
2196 		return "none";
2197 	case I915_BIT_6_SWIZZLE_9:
2198 		return "bit9";
2199 	case I915_BIT_6_SWIZZLE_9_10:
2200 		return "bit9/bit10";
2201 	case I915_BIT_6_SWIZZLE_9_11:
2202 		return "bit9/bit11";
2203 	case I915_BIT_6_SWIZZLE_9_10_11:
2204 		return "bit9/bit10/bit11";
2205 	case I915_BIT_6_SWIZZLE_9_17:
2206 		return "bit9/bit17";
2207 	case I915_BIT_6_SWIZZLE_9_10_17:
2208 		return "bit9/bit10/bit17";
2209 	case I915_BIT_6_SWIZZLE_UNKNOWN:
2210 		return "unknown";
2211 	}
2212 
2213 	return "bug";
2214 }
2215 
2216 static int i915_swizzle_info(struct seq_file *m, void *data)
2217 {
2218 	struct drm_info_node *node = m->private;
2219 	struct drm_device *dev = node->minor->dev;
2220 	struct drm_i915_private *dev_priv = dev->dev_private;
2221 	int ret;
2222 
2223 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2224 	if (ret)
2225 		return ret;
2226 	intel_runtime_pm_get(dev_priv);
2227 
2228 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2229 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2230 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2231 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2232 
2233 	if (IS_GEN3(dev) || IS_GEN4(dev)) {
2234 		seq_printf(m, "DDC = 0x%08x\n",
2235 			   I915_READ(DCC));
2236 		seq_printf(m, "DDC2 = 0x%08x\n",
2237 			   I915_READ(DCC2));
2238 		seq_printf(m, "C0DRB3 = 0x%04x\n",
2239 			   I915_READ16(C0DRB3));
2240 		seq_printf(m, "C1DRB3 = 0x%04x\n",
2241 			   I915_READ16(C1DRB3));
2242 	} else if (INTEL_INFO(dev)->gen >= 6) {
2243 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2244 			   I915_READ(MAD_DIMM_C0));
2245 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2246 			   I915_READ(MAD_DIMM_C1));
2247 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2248 			   I915_READ(MAD_DIMM_C2));
2249 		seq_printf(m, "TILECTL = 0x%08x\n",
2250 			   I915_READ(TILECTL));
2251 		if (INTEL_INFO(dev)->gen >= 8)
2252 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2253 				   I915_READ(GAMTARBMODE));
2254 		else
2255 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2256 				   I915_READ(ARB_MODE));
2257 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2258 			   I915_READ(DISP_ARB_CTL));
2259 	}
2260 
2261 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2262 		seq_puts(m, "L-shaped memory detected\n");
2263 
2264 	intel_runtime_pm_put(dev_priv);
2265 	mutex_unlock(&dev->struct_mutex);
2266 
2267 	return 0;
2268 }
2269 
2270 static int per_file_ctx(int id, void *ptr, void *data)
2271 {
2272 	struct intel_context *ctx = ptr;
2273 	struct seq_file *m = data;
2274 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2275 
2276 	if (!ppgtt) {
2277 		seq_printf(m, "  no ppgtt for context %d\n",
2278 			   ctx->user_handle);
2279 		return 0;
2280 	}
2281 
2282 	if (i915_gem_context_is_default(ctx))
2283 		seq_puts(m, "  default context:\n");
2284 	else
2285 		seq_printf(m, "  context %d:\n", ctx->user_handle);
2286 	ppgtt->debug_dump(ppgtt, m);
2287 
2288 	return 0;
2289 }
2290 
2291 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2292 {
2293 	struct drm_i915_private *dev_priv = dev->dev_private;
2294 	struct intel_engine_cs *engine;
2295 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2296 	int i;
2297 
2298 	if (!ppgtt)
2299 		return;
2300 
2301 	for_each_engine(engine, dev_priv) {
2302 		seq_printf(m, "%s\n", engine->name);
2303 		for (i = 0; i < 4; i++) {
2304 			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2305 			pdp <<= 32;
2306 			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2307 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2308 		}
2309 	}
2310 }
2311 
2312 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2313 {
2314 	struct drm_i915_private *dev_priv = dev->dev_private;
2315 	struct intel_engine_cs *engine;
2316 
2317 	if (INTEL_INFO(dev)->gen == 6)
2318 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2319 
2320 	for_each_engine(engine, dev_priv) {
2321 		seq_printf(m, "%s\n", engine->name);
2322 		if (INTEL_INFO(dev)->gen == 7)
2323 			seq_printf(m, "GFX_MODE: 0x%08x\n",
2324 				   I915_READ(RING_MODE_GEN7(engine)));
2325 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2326 			   I915_READ(RING_PP_DIR_BASE(engine)));
2327 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2328 			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
2329 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2330 			   I915_READ(RING_PP_DIR_DCLV(engine)));
2331 	}
2332 	if (dev_priv->mm.aliasing_ppgtt) {
2333 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2334 
2335 		seq_puts(m, "aliasing PPGTT:\n");
2336 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2337 
2338 		ppgtt->debug_dump(ppgtt, m);
2339 	}
2340 
2341 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2342 }
2343 
2344 static int i915_ppgtt_info(struct seq_file *m, void *data)
2345 {
2346 	struct drm_info_node *node = m->private;
2347 	struct drm_device *dev = node->minor->dev;
2348 	struct drm_i915_private *dev_priv = dev->dev_private;
2349 	struct drm_file *file;
2350 
2351 	int ret = mutex_lock_interruptible(&dev->struct_mutex);
2352 	if (ret)
2353 		return ret;
2354 	intel_runtime_pm_get(dev_priv);
2355 
2356 	if (INTEL_INFO(dev)->gen >= 8)
2357 		gen8_ppgtt_info(m, dev);
2358 	else if (INTEL_INFO(dev)->gen >= 6)
2359 		gen6_ppgtt_info(m, dev);
2360 
2361 	mutex_lock(&dev->filelist_mutex);
2362 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2363 		struct drm_i915_file_private *file_priv = file->driver_priv;
2364 		struct task_struct *task;
2365 
2366 		task = get_pid_task(file->pid, PIDTYPE_PID);
2367 		if (!task) {
2368 			ret = -ESRCH;
2369 			goto out_unlock;
2370 		}
2371 		seq_printf(m, "\nproc: %s\n", task->comm);
2372 		put_task_struct(task);
2373 		idr_for_each(&file_priv->context_idr, per_file_ctx,
2374 			     (void *)(unsigned long)m);
2375 	}
2376 out_unlock:
2377 	mutex_unlock(&dev->filelist_mutex);
2378 
2379 	intel_runtime_pm_put(dev_priv);
2380 	mutex_unlock(&dev->struct_mutex);
2381 
2382 	return ret;
2383 }
2384 
2385 static int count_irq_waiters(struct drm_i915_private *i915)
2386 {
2387 	struct intel_engine_cs *engine;
2388 	int count = 0;
2389 
2390 	for_each_engine(engine, i915)
2391 		count += engine->irq_refcount;
2392 
2393 	return count;
2394 }
2395 
2396 static int i915_rps_boost_info(struct seq_file *m, void *data)
2397 {
2398 	struct drm_info_node *node = m->private;
2399 	struct drm_device *dev = node->minor->dev;
2400 	struct drm_i915_private *dev_priv = dev->dev_private;
2401 	struct drm_file *file;
2402 
2403 	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2404 	seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
2405 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2406 	seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2407 		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
2408 		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2409 		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2410 		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2411 		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2412 
2413 	mutex_lock(&dev->filelist_mutex);
2414 	spin_lock(&dev_priv->rps.client_lock);
2415 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2416 		struct drm_i915_file_private *file_priv = file->driver_priv;
2417 		struct task_struct *task;
2418 
2419 		rcu_read_lock();
2420 		task = pid_task(file->pid, PIDTYPE_PID);
2421 		seq_printf(m, "%s [%d]: %d boosts%s\n",
2422 			   task ? task->comm : "<unknown>",
2423 			   task ? task->pid : -1,
2424 			   file_priv->rps.boosts,
2425 			   list_empty(&file_priv->rps.link) ? "" : ", active");
2426 		rcu_read_unlock();
2427 	}
2428 	seq_printf(m, "Semaphore boosts: %d%s\n",
2429 		   dev_priv->rps.semaphores.boosts,
2430 		   list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
2431 	seq_printf(m, "MMIO flip boosts: %d%s\n",
2432 		   dev_priv->rps.mmioflips.boosts,
2433 		   list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
2434 	seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
2435 	spin_unlock(&dev_priv->rps.client_lock);
2436 	mutex_unlock(&dev->filelist_mutex);
2437 
2438 	return 0;
2439 }
2440 
2441 static int i915_llc(struct seq_file *m, void *data)
2442 {
2443 	struct drm_info_node *node = m->private;
2444 	struct drm_device *dev = node->minor->dev;
2445 	struct drm_i915_private *dev_priv = dev->dev_private;
2446 	const bool edram = INTEL_GEN(dev_priv) > 8;
2447 
2448 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2449 	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2450 		   intel_uncore_edram_size(dev_priv)/1024/1024);
2451 
2452 	return 0;
2453 }
2454 
2455 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2456 {
2457 	struct drm_info_node *node = m->private;
2458 	struct drm_i915_private *dev_priv = node->minor->dev->dev_private;
2459 	struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
2460 	u32 tmp, i;
2461 
2462 	if (!HAS_GUC_UCODE(dev_priv))
2463 		return 0;
2464 
2465 	seq_printf(m, "GuC firmware status:\n");
2466 	seq_printf(m, "\tpath: %s\n",
2467 		guc_fw->guc_fw_path);
2468 	seq_printf(m, "\tfetch: %s\n",
2469 		intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
2470 	seq_printf(m, "\tload: %s\n",
2471 		intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
2472 	seq_printf(m, "\tversion wanted: %d.%d\n",
2473 		guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
2474 	seq_printf(m, "\tversion found: %d.%d\n",
2475 		guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
2476 	seq_printf(m, "\theader: offset is %d; size = %d\n",
2477 		guc_fw->header_offset, guc_fw->header_size);
2478 	seq_printf(m, "\tuCode: offset is %d; size = %d\n",
2479 		guc_fw->ucode_offset, guc_fw->ucode_size);
2480 	seq_printf(m, "\tRSA: offset is %d; size = %d\n",
2481 		guc_fw->rsa_offset, guc_fw->rsa_size);
2482 
2483 	tmp = I915_READ(GUC_STATUS);
2484 
2485 	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2486 	seq_printf(m, "\tBootrom status = 0x%x\n",
2487 		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2488 	seq_printf(m, "\tuKernel status = 0x%x\n",
2489 		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2490 	seq_printf(m, "\tMIA Core status = 0x%x\n",
2491 		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2492 	seq_puts(m, "\nScratch registers:\n");
2493 	for (i = 0; i < 16; i++)
2494 		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2495 
2496 	return 0;
2497 }
2498 
2499 static void i915_guc_client_info(struct seq_file *m,
2500 				 struct drm_i915_private *dev_priv,
2501 				 struct i915_guc_client *client)
2502 {
2503 	struct intel_engine_cs *engine;
2504 	uint64_t tot = 0;
2505 
2506 	seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
2507 		client->priority, client->ctx_index, client->proc_desc_offset);
2508 	seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
2509 		client->doorbell_id, client->doorbell_offset, client->cookie);
2510 	seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2511 		client->wq_size, client->wq_offset, client->wq_tail);
2512 
2513 	seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
2514 	seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2515 	seq_printf(m, "\tLast submission result: %d\n", client->retcode);
2516 
2517 	for_each_engine(engine, dev_priv) {
2518 		seq_printf(m, "\tSubmissions: %llu %s\n",
2519 				client->submissions[engine->guc_id],
2520 				engine->name);
2521 		tot += client->submissions[engine->guc_id];
2522 	}
2523 	seq_printf(m, "\tTotal: %llu\n", tot);
2524 }
2525 
2526 static int i915_guc_info(struct seq_file *m, void *data)
2527 {
2528 	struct drm_info_node *node = m->private;
2529 	struct drm_device *dev = node->minor->dev;
2530 	struct drm_i915_private *dev_priv = dev->dev_private;
2531 	struct intel_guc guc;
2532 	struct i915_guc_client client = {};
2533 	struct intel_engine_cs *engine;
2534 	u64 total = 0;
2535 
2536 	if (!HAS_GUC_SCHED(dev_priv))
2537 		return 0;
2538 
2539 	if (mutex_lock_interruptible(&dev->struct_mutex))
2540 		return 0;
2541 
2542 	/* Take a local copy of the GuC data, so we can dump it at leisure */
2543 	guc = dev_priv->guc;
2544 	if (guc.execbuf_client)
2545 		client = *guc.execbuf_client;
2546 
2547 	mutex_unlock(&dev->struct_mutex);
2548 
2549 	seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
2550 	seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
2551 	seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
2552 	seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
2553 	seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
2554 
2555 	seq_printf(m, "\nGuC submissions:\n");
2556 	for_each_engine(engine, dev_priv) {
2557 		seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
2558 			engine->name, guc.submissions[engine->guc_id],
2559 			guc.last_seqno[engine->guc_id]);
2560 		total += guc.submissions[engine->guc_id];
2561 	}
2562 	seq_printf(m, "\t%s: %llu\n", "Total", total);
2563 
2564 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
2565 	i915_guc_client_info(m, dev_priv, &client);
2566 
2567 	/* Add more as required ... */
2568 
2569 	return 0;
2570 }
2571 
2572 static int i915_guc_log_dump(struct seq_file *m, void *data)
2573 {
2574 	struct drm_info_node *node = m->private;
2575 	struct drm_device *dev = node->minor->dev;
2576 	struct drm_i915_private *dev_priv = dev->dev_private;
2577 	struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
2578 	u32 *log;
2579 	int i = 0, pg;
2580 
2581 	if (!log_obj)
2582 		return 0;
2583 
2584 	for (pg = 0; pg < log_obj->base.size / PAGE_SIZE; pg++) {
2585 		log = kmap_atomic(i915_gem_object_get_page(log_obj, pg));
2586 
2587 		for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
2588 			seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2589 				   *(log + i), *(log + i + 1),
2590 				   *(log + i + 2), *(log + i + 3));
2591 
2592 		kunmap_atomic(log);
2593 	}
2594 
2595 	seq_putc(m, '\n');
2596 
2597 	return 0;
2598 }
2599 
2600 static int i915_edp_psr_status(struct seq_file *m, void *data)
2601 {
2602 	struct drm_info_node *node = m->private;
2603 	struct drm_device *dev = node->minor->dev;
2604 	struct drm_i915_private *dev_priv = dev->dev_private;
2605 	u32 psrperf = 0;
2606 	u32 stat[3];
2607 	enum pipe pipe;
2608 	bool enabled = false;
2609 
2610 	if (!HAS_PSR(dev)) {
2611 		seq_puts(m, "PSR not supported\n");
2612 		return 0;
2613 	}
2614 
2615 	intel_runtime_pm_get(dev_priv);
2616 
2617 	mutex_lock(&dev_priv->psr.lock);
2618 	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2619 	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2620 	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2621 	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2622 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2623 		   dev_priv->psr.busy_frontbuffer_bits);
2624 	seq_printf(m, "Re-enable work scheduled: %s\n",
2625 		   yesno(work_busy(&dev_priv->psr.work.work)));
2626 
2627 	if (HAS_DDI(dev))
2628 		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2629 	else {
2630 		for_each_pipe(dev_priv, pipe) {
2631 			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2632 				VLV_EDP_PSR_CURR_STATE_MASK;
2633 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2634 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2635 				enabled = true;
2636 		}
2637 	}
2638 
2639 	seq_printf(m, "Main link in standby mode: %s\n",
2640 		   yesno(dev_priv->psr.link_standby));
2641 
2642 	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2643 
2644 	if (!HAS_DDI(dev))
2645 		for_each_pipe(dev_priv, pipe) {
2646 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2647 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2648 				seq_printf(m, " pipe %c", pipe_name(pipe));
2649 		}
2650 	seq_puts(m, "\n");
2651 
2652 	/*
2653 	 * VLV/CHV PSR has no kind of performance counter
2654 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2655 	 */
2656 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2657 		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2658 			EDP_PSR_PERF_CNT_MASK;
2659 
2660 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2661 	}
2662 	mutex_unlock(&dev_priv->psr.lock);
2663 
2664 	intel_runtime_pm_put(dev_priv);
2665 	return 0;
2666 }
2667 
2668 static int i915_sink_crc(struct seq_file *m, void *data)
2669 {
2670 	struct drm_info_node *node = m->private;
2671 	struct drm_device *dev = node->minor->dev;
2672 	struct intel_encoder *encoder;
2673 	struct intel_connector *connector;
2674 	struct intel_dp *intel_dp = NULL;
2675 	int ret;
2676 	u8 crc[6];
2677 
2678 	drm_modeset_lock_all(dev);
2679 	for_each_intel_connector(dev, connector) {
2680 
2681 		if (connector->base.dpms != DRM_MODE_DPMS_ON)
2682 			continue;
2683 
2684 		if (!connector->base.encoder)
2685 			continue;
2686 
2687 		encoder = to_intel_encoder(connector->base.encoder);
2688 		if (encoder->type != INTEL_OUTPUT_EDP)
2689 			continue;
2690 
2691 		intel_dp = enc_to_intel_dp(&encoder->base);
2692 
2693 		ret = intel_dp_sink_crc(intel_dp, crc);
2694 		if (ret)
2695 			goto out;
2696 
2697 		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2698 			   crc[0], crc[1], crc[2],
2699 			   crc[3], crc[4], crc[5]);
2700 		goto out;
2701 	}
2702 	ret = -ENODEV;
2703 out:
2704 	drm_modeset_unlock_all(dev);
2705 	return ret;
2706 }
2707 
2708 static int i915_energy_uJ(struct seq_file *m, void *data)
2709 {
2710 	struct drm_info_node *node = m->private;
2711 	struct drm_device *dev = node->minor->dev;
2712 	struct drm_i915_private *dev_priv = dev->dev_private;
2713 	u64 power;
2714 	u32 units;
2715 
2716 	if (INTEL_INFO(dev)->gen < 6)
2717 		return -ENODEV;
2718 
2719 	intel_runtime_pm_get(dev_priv);
2720 
2721 	rdmsrl(MSR_RAPL_POWER_UNIT, power);
2722 	power = (power & 0x1f00) >> 8;
2723 	units = 1000000 / (1 << power); /* convert to uJ */
2724 	power = I915_READ(MCH_SECP_NRG_STTS);
2725 	power *= units;
2726 
2727 	intel_runtime_pm_put(dev_priv);
2728 
2729 	seq_printf(m, "%llu", (long long unsigned)power);
2730 
2731 	return 0;
2732 }
2733 
2734 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2735 {
2736 	struct drm_info_node *node = m->private;
2737 	struct drm_device *dev = node->minor->dev;
2738 	struct drm_i915_private *dev_priv = dev->dev_private;
2739 
2740 	if (!HAS_RUNTIME_PM(dev_priv))
2741 		seq_puts(m, "Runtime power management not supported\n");
2742 
2743 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2744 	seq_printf(m, "IRQs disabled: %s\n",
2745 		   yesno(!intel_irqs_enabled(dev_priv)));
2746 #ifdef CONFIG_PM
2747 	seq_printf(m, "Usage count: %d\n",
2748 		   atomic_read(&dev->dev->power.usage_count));
2749 #else
2750 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2751 #endif
2752 	seq_printf(m, "PCI device power state: %s [%d]\n",
2753 		   pci_power_name(dev_priv->dev->pdev->current_state),
2754 		   dev_priv->dev->pdev->current_state);
2755 
2756 	return 0;
2757 }
2758 
2759 static int i915_power_domain_info(struct seq_file *m, void *unused)
2760 {
2761 	struct drm_info_node *node = m->private;
2762 	struct drm_device *dev = node->minor->dev;
2763 	struct drm_i915_private *dev_priv = dev->dev_private;
2764 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2765 	int i;
2766 
2767 	mutex_lock(&power_domains->lock);
2768 
2769 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2770 	for (i = 0; i < power_domains->power_well_count; i++) {
2771 		struct i915_power_well *power_well;
2772 		enum intel_display_power_domain power_domain;
2773 
2774 		power_well = &power_domains->power_wells[i];
2775 		seq_printf(m, "%-25s %d\n", power_well->name,
2776 			   power_well->count);
2777 
2778 		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2779 		     power_domain++) {
2780 			if (!(BIT(power_domain) & power_well->domains))
2781 				continue;
2782 
2783 			seq_printf(m, "  %-23s %d\n",
2784 				 intel_display_power_domain_str(power_domain),
2785 				 power_domains->domain_use_count[power_domain]);
2786 		}
2787 	}
2788 
2789 	mutex_unlock(&power_domains->lock);
2790 
2791 	return 0;
2792 }
2793 
2794 static int i915_dmc_info(struct seq_file *m, void *unused)
2795 {
2796 	struct drm_info_node *node = m->private;
2797 	struct drm_device *dev = node->minor->dev;
2798 	struct drm_i915_private *dev_priv = dev->dev_private;
2799 	struct intel_csr *csr;
2800 
2801 	if (!HAS_CSR(dev)) {
2802 		seq_puts(m, "not supported\n");
2803 		return 0;
2804 	}
2805 
2806 	csr = &dev_priv->csr;
2807 
2808 	intel_runtime_pm_get(dev_priv);
2809 
2810 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2811 	seq_printf(m, "path: %s\n", csr->fw_path);
2812 
2813 	if (!csr->dmc_payload)
2814 		goto out;
2815 
2816 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2817 		   CSR_VERSION_MINOR(csr->version));
2818 
2819 	if (IS_SKYLAKE(dev) && csr->version >= CSR_VERSION(1, 6)) {
2820 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2821 			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
2822 		seq_printf(m, "DC5 -> DC6 count: %d\n",
2823 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2824 	} else if (IS_BROXTON(dev) && csr->version >= CSR_VERSION(1, 4)) {
2825 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2826 			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2827 	}
2828 
2829 out:
2830 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2831 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2832 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2833 
2834 	intel_runtime_pm_put(dev_priv);
2835 
2836 	return 0;
2837 }
2838 
2839 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2840 				 struct drm_display_mode *mode)
2841 {
2842 	int i;
2843 
2844 	for (i = 0; i < tabs; i++)
2845 		seq_putc(m, '\t');
2846 
2847 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2848 		   mode->base.id, mode->name,
2849 		   mode->vrefresh, mode->clock,
2850 		   mode->hdisplay, mode->hsync_start,
2851 		   mode->hsync_end, mode->htotal,
2852 		   mode->vdisplay, mode->vsync_start,
2853 		   mode->vsync_end, mode->vtotal,
2854 		   mode->type, mode->flags);
2855 }
2856 
2857 static void intel_encoder_info(struct seq_file *m,
2858 			       struct intel_crtc *intel_crtc,
2859 			       struct intel_encoder *intel_encoder)
2860 {
2861 	struct drm_info_node *node = m->private;
2862 	struct drm_device *dev = node->minor->dev;
2863 	struct drm_crtc *crtc = &intel_crtc->base;
2864 	struct intel_connector *intel_connector;
2865 	struct drm_encoder *encoder;
2866 
2867 	encoder = &intel_encoder->base;
2868 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2869 		   encoder->base.id, encoder->name);
2870 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2871 		struct drm_connector *connector = &intel_connector->base;
2872 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2873 			   connector->base.id,
2874 			   connector->name,
2875 			   drm_get_connector_status_name(connector->status));
2876 		if (connector->status == connector_status_connected) {
2877 			struct drm_display_mode *mode = &crtc->mode;
2878 			seq_printf(m, ", mode:\n");
2879 			intel_seq_print_mode(m, 2, mode);
2880 		} else {
2881 			seq_putc(m, '\n');
2882 		}
2883 	}
2884 }
2885 
2886 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2887 {
2888 	struct drm_info_node *node = m->private;
2889 	struct drm_device *dev = node->minor->dev;
2890 	struct drm_crtc *crtc = &intel_crtc->base;
2891 	struct intel_encoder *intel_encoder;
2892 	struct drm_plane_state *plane_state = crtc->primary->state;
2893 	struct drm_framebuffer *fb = plane_state->fb;
2894 
2895 	if (fb)
2896 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2897 			   fb->base.id, plane_state->src_x >> 16,
2898 			   plane_state->src_y >> 16, fb->width, fb->height);
2899 	else
2900 		seq_puts(m, "\tprimary plane disabled\n");
2901 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2902 		intel_encoder_info(m, intel_crtc, intel_encoder);
2903 }
2904 
2905 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2906 {
2907 	struct drm_display_mode *mode = panel->fixed_mode;
2908 
2909 	seq_printf(m, "\tfixed mode:\n");
2910 	intel_seq_print_mode(m, 2, mode);
2911 }
2912 
2913 static void intel_dp_info(struct seq_file *m,
2914 			  struct intel_connector *intel_connector)
2915 {
2916 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2917 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2918 
2919 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2920 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2921 	if (intel_encoder->type == INTEL_OUTPUT_EDP)
2922 		intel_panel_info(m, &intel_connector->panel);
2923 }
2924 
2925 static void intel_hdmi_info(struct seq_file *m,
2926 			    struct intel_connector *intel_connector)
2927 {
2928 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2929 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2930 
2931 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2932 }
2933 
2934 static void intel_lvds_info(struct seq_file *m,
2935 			    struct intel_connector *intel_connector)
2936 {
2937 	intel_panel_info(m, &intel_connector->panel);
2938 }
2939 
2940 static void intel_connector_info(struct seq_file *m,
2941 				 struct drm_connector *connector)
2942 {
2943 	struct intel_connector *intel_connector = to_intel_connector(connector);
2944 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2945 	struct drm_display_mode *mode;
2946 
2947 	seq_printf(m, "connector %d: type %s, status: %s\n",
2948 		   connector->base.id, connector->name,
2949 		   drm_get_connector_status_name(connector->status));
2950 	if (connector->status == connector_status_connected) {
2951 		seq_printf(m, "\tname: %s\n", connector->display_info.name);
2952 		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2953 			   connector->display_info.width_mm,
2954 			   connector->display_info.height_mm);
2955 		seq_printf(m, "\tsubpixel order: %s\n",
2956 			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2957 		seq_printf(m, "\tCEA rev: %d\n",
2958 			   connector->display_info.cea_rev);
2959 	}
2960 	if (intel_encoder) {
2961 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2962 		    intel_encoder->type == INTEL_OUTPUT_EDP)
2963 			intel_dp_info(m, intel_connector);
2964 		else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2965 			intel_hdmi_info(m, intel_connector);
2966 		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2967 			intel_lvds_info(m, intel_connector);
2968 	}
2969 
2970 	seq_printf(m, "\tmodes:\n");
2971 	list_for_each_entry(mode, &connector->modes, head)
2972 		intel_seq_print_mode(m, 2, mode);
2973 }
2974 
2975 static bool cursor_active(struct drm_device *dev, int pipe)
2976 {
2977 	struct drm_i915_private *dev_priv = dev->dev_private;
2978 	u32 state;
2979 
2980 	if (IS_845G(dev) || IS_I865G(dev))
2981 		state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
2982 	else
2983 		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2984 
2985 	return state;
2986 }
2987 
2988 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2989 {
2990 	struct drm_i915_private *dev_priv = dev->dev_private;
2991 	u32 pos;
2992 
2993 	pos = I915_READ(CURPOS(pipe));
2994 
2995 	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2996 	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2997 		*x = -*x;
2998 
2999 	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
3000 	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
3001 		*y = -*y;
3002 
3003 	return cursor_active(dev, pipe);
3004 }
3005 
3006 static const char *plane_type(enum drm_plane_type type)
3007 {
3008 	switch (type) {
3009 	case DRM_PLANE_TYPE_OVERLAY:
3010 		return "OVL";
3011 	case DRM_PLANE_TYPE_PRIMARY:
3012 		return "PRI";
3013 	case DRM_PLANE_TYPE_CURSOR:
3014 		return "CUR";
3015 	/*
3016 	 * Deliberately omitting default: to generate compiler warnings
3017 	 * when a new drm_plane_type gets added.
3018 	 */
3019 	}
3020 
3021 	return "unknown";
3022 }
3023 
3024 static const char *plane_rotation(unsigned int rotation)
3025 {
3026 	static char buf[48];
3027 	/*
3028 	 * According to doc only one DRM_ROTATE_ is allowed but this
3029 	 * will print them all to visualize if the values are misused
3030 	 */
3031 	snprintf(buf, sizeof(buf),
3032 		 "%s%s%s%s%s%s(0x%08x)",
3033 		 (rotation & BIT(DRM_ROTATE_0)) ? "0 " : "",
3034 		 (rotation & BIT(DRM_ROTATE_90)) ? "90 " : "",
3035 		 (rotation & BIT(DRM_ROTATE_180)) ? "180 " : "",
3036 		 (rotation & BIT(DRM_ROTATE_270)) ? "270 " : "",
3037 		 (rotation & BIT(DRM_REFLECT_X)) ? "FLIPX " : "",
3038 		 (rotation & BIT(DRM_REFLECT_Y)) ? "FLIPY " : "",
3039 		 rotation);
3040 
3041 	return buf;
3042 }
3043 
3044 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3045 {
3046 	struct drm_info_node *node = m->private;
3047 	struct drm_device *dev = node->minor->dev;
3048 	struct intel_plane *intel_plane;
3049 
3050 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3051 		struct drm_plane_state *state;
3052 		struct drm_plane *plane = &intel_plane->base;
3053 
3054 		if (!plane->state) {
3055 			seq_puts(m, "plane->state is NULL!\n");
3056 			continue;
3057 		}
3058 
3059 		state = plane->state;
3060 
3061 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3062 			   plane->base.id,
3063 			   plane_type(intel_plane->base.type),
3064 			   state->crtc_x, state->crtc_y,
3065 			   state->crtc_w, state->crtc_h,
3066 			   (state->src_x >> 16),
3067 			   ((state->src_x & 0xffff) * 15625) >> 10,
3068 			   (state->src_y >> 16),
3069 			   ((state->src_y & 0xffff) * 15625) >> 10,
3070 			   (state->src_w >> 16),
3071 			   ((state->src_w & 0xffff) * 15625) >> 10,
3072 			   (state->src_h >> 16),
3073 			   ((state->src_h & 0xffff) * 15625) >> 10,
3074 			   state->fb ? drm_get_format_name(state->fb->pixel_format) : "N/A",
3075 			   plane_rotation(state->rotation));
3076 	}
3077 }
3078 
3079 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3080 {
3081 	struct intel_crtc_state *pipe_config;
3082 	int num_scalers = intel_crtc->num_scalers;
3083 	int i;
3084 
3085 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3086 
3087 	/* Not all platformas have a scaler */
3088 	if (num_scalers) {
3089 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3090 			   num_scalers,
3091 			   pipe_config->scaler_state.scaler_users,
3092 			   pipe_config->scaler_state.scaler_id);
3093 
3094 		for (i = 0; i < SKL_NUM_SCALERS; i++) {
3095 			struct intel_scaler *sc =
3096 					&pipe_config->scaler_state.scalers[i];
3097 
3098 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3099 				   i, yesno(sc->in_use), sc->mode);
3100 		}
3101 		seq_puts(m, "\n");
3102 	} else {
3103 		seq_puts(m, "\tNo scalers available on this platform\n");
3104 	}
3105 }
3106 
3107 static int i915_display_info(struct seq_file *m, void *unused)
3108 {
3109 	struct drm_info_node *node = m->private;
3110 	struct drm_device *dev = node->minor->dev;
3111 	struct drm_i915_private *dev_priv = dev->dev_private;
3112 	struct intel_crtc *crtc;
3113 	struct drm_connector *connector;
3114 
3115 	intel_runtime_pm_get(dev_priv);
3116 	drm_modeset_lock_all(dev);
3117 	seq_printf(m, "CRTC info\n");
3118 	seq_printf(m, "---------\n");
3119 	for_each_intel_crtc(dev, crtc) {
3120 		bool active;
3121 		struct intel_crtc_state *pipe_config;
3122 		int x, y;
3123 
3124 		pipe_config = to_intel_crtc_state(crtc->base.state);
3125 
3126 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3127 			   crtc->base.base.id, pipe_name(crtc->pipe),
3128 			   yesno(pipe_config->base.active),
3129 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3130 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3131 
3132 		if (pipe_config->base.active) {
3133 			intel_crtc_info(m, crtc);
3134 
3135 			active = cursor_position(dev, crtc->pipe, &x, &y);
3136 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
3137 				   yesno(crtc->cursor_base),
3138 				   x, y, crtc->base.cursor->state->crtc_w,
3139 				   crtc->base.cursor->state->crtc_h,
3140 				   crtc->cursor_addr, yesno(active));
3141 			intel_scaler_info(m, crtc);
3142 			intel_plane_info(m, crtc);
3143 		}
3144 
3145 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3146 			   yesno(!crtc->cpu_fifo_underrun_disabled),
3147 			   yesno(!crtc->pch_fifo_underrun_disabled));
3148 	}
3149 
3150 	seq_printf(m, "\n");
3151 	seq_printf(m, "Connector info\n");
3152 	seq_printf(m, "--------------\n");
3153 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3154 		intel_connector_info(m, connector);
3155 	}
3156 	drm_modeset_unlock_all(dev);
3157 	intel_runtime_pm_put(dev_priv);
3158 
3159 	return 0;
3160 }
3161 
3162 static int i915_semaphore_status(struct seq_file *m, void *unused)
3163 {
3164 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3165 	struct drm_device *dev = node->minor->dev;
3166 	struct drm_i915_private *dev_priv = dev->dev_private;
3167 	struct intel_engine_cs *engine;
3168 	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
3169 	enum intel_engine_id id;
3170 	int j, ret;
3171 
3172 	if (!i915_semaphore_is_enabled(dev)) {
3173 		seq_puts(m, "Semaphores are disabled\n");
3174 		return 0;
3175 	}
3176 
3177 	ret = mutex_lock_interruptible(&dev->struct_mutex);
3178 	if (ret)
3179 		return ret;
3180 	intel_runtime_pm_get(dev_priv);
3181 
3182 	if (IS_BROADWELL(dev)) {
3183 		struct page *page;
3184 		uint64_t *seqno;
3185 
3186 		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
3187 
3188 		seqno = (uint64_t *)kmap_atomic(page);
3189 		for_each_engine_id(engine, dev_priv, id) {
3190 			uint64_t offset;
3191 
3192 			seq_printf(m, "%s\n", engine->name);
3193 
3194 			seq_puts(m, "  Last signal:");
3195 			for (j = 0; j < num_rings; j++) {
3196 				offset = id * I915_NUM_ENGINES + j;
3197 				seq_printf(m, "0x%08llx (0x%02llx) ",
3198 					   seqno[offset], offset * 8);
3199 			}
3200 			seq_putc(m, '\n');
3201 
3202 			seq_puts(m, "  Last wait:  ");
3203 			for (j = 0; j < num_rings; j++) {
3204 				offset = id + (j * I915_NUM_ENGINES);
3205 				seq_printf(m, "0x%08llx (0x%02llx) ",
3206 					   seqno[offset], offset * 8);
3207 			}
3208 			seq_putc(m, '\n');
3209 
3210 		}
3211 		kunmap_atomic(seqno);
3212 	} else {
3213 		seq_puts(m, "  Last signal:");
3214 		for_each_engine(engine, dev_priv)
3215 			for (j = 0; j < num_rings; j++)
3216 				seq_printf(m, "0x%08x\n",
3217 					   I915_READ(engine->semaphore.mbox.signal[j]));
3218 		seq_putc(m, '\n');
3219 	}
3220 
3221 	seq_puts(m, "\nSync seqno:\n");
3222 	for_each_engine(engine, dev_priv) {
3223 		for (j = 0; j < num_rings; j++)
3224 			seq_printf(m, "  0x%08x ",
3225 				   engine->semaphore.sync_seqno[j]);
3226 		seq_putc(m, '\n');
3227 	}
3228 	seq_putc(m, '\n');
3229 
3230 	intel_runtime_pm_put(dev_priv);
3231 	mutex_unlock(&dev->struct_mutex);
3232 	return 0;
3233 }
3234 
3235 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3236 {
3237 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3238 	struct drm_device *dev = node->minor->dev;
3239 	struct drm_i915_private *dev_priv = dev->dev_private;
3240 	int i;
3241 
3242 	drm_modeset_lock_all(dev);
3243 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3244 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3245 
3246 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
3247 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3248 			   pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
3249 		seq_printf(m, " tracked hardware state:\n");
3250 		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
3251 		seq_printf(m, " dpll_md: 0x%08x\n",
3252 			   pll->config.hw_state.dpll_md);
3253 		seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
3254 		seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
3255 		seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
3256 	}
3257 	drm_modeset_unlock_all(dev);
3258 
3259 	return 0;
3260 }
3261 
3262 static int i915_wa_registers(struct seq_file *m, void *unused)
3263 {
3264 	int i;
3265 	int ret;
3266 	struct intel_engine_cs *engine;
3267 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3268 	struct drm_device *dev = node->minor->dev;
3269 	struct drm_i915_private *dev_priv = dev->dev_private;
3270 	struct i915_workarounds *workarounds = &dev_priv->workarounds;
3271 	enum intel_engine_id id;
3272 
3273 	ret = mutex_lock_interruptible(&dev->struct_mutex);
3274 	if (ret)
3275 		return ret;
3276 
3277 	intel_runtime_pm_get(dev_priv);
3278 
3279 	seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
3280 	for_each_engine_id(engine, dev_priv, id)
3281 		seq_printf(m, "HW whitelist count for %s: %d\n",
3282 			   engine->name, workarounds->hw_whitelist_count[id]);
3283 	for (i = 0; i < workarounds->count; ++i) {
3284 		i915_reg_t addr;
3285 		u32 mask, value, read;
3286 		bool ok;
3287 
3288 		addr = workarounds->reg[i].addr;
3289 		mask = workarounds->reg[i].mask;
3290 		value = workarounds->reg[i].value;
3291 		read = I915_READ(addr);
3292 		ok = (value & mask) == (read & mask);
3293 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
3294 			   i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL");
3295 	}
3296 
3297 	intel_runtime_pm_put(dev_priv);
3298 	mutex_unlock(&dev->struct_mutex);
3299 
3300 	return 0;
3301 }
3302 
3303 static int i915_ddb_info(struct seq_file *m, void *unused)
3304 {
3305 	struct drm_info_node *node = m->private;
3306 	struct drm_device *dev = node->minor->dev;
3307 	struct drm_i915_private *dev_priv = dev->dev_private;
3308 	struct skl_ddb_allocation *ddb;
3309 	struct skl_ddb_entry *entry;
3310 	enum pipe pipe;
3311 	int plane;
3312 
3313 	if (INTEL_INFO(dev)->gen < 9)
3314 		return 0;
3315 
3316 	drm_modeset_lock_all(dev);
3317 
3318 	ddb = &dev_priv->wm.skl_hw.ddb;
3319 
3320 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3321 
3322 	for_each_pipe(dev_priv, pipe) {
3323 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3324 
3325 		for_each_plane(dev_priv, pipe, plane) {
3326 			entry = &ddb->plane[pipe][plane];
3327 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3328 				   entry->start, entry->end,
3329 				   skl_ddb_entry_size(entry));
3330 		}
3331 
3332 		entry = &ddb->plane[pipe][PLANE_CURSOR];
3333 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3334 			   entry->end, skl_ddb_entry_size(entry));
3335 	}
3336 
3337 	drm_modeset_unlock_all(dev);
3338 
3339 	return 0;
3340 }
3341 
3342 static void drrs_status_per_crtc(struct seq_file *m,
3343 		struct drm_device *dev, struct intel_crtc *intel_crtc)
3344 {
3345 	struct intel_encoder *intel_encoder;
3346 	struct drm_i915_private *dev_priv = dev->dev_private;
3347 	struct i915_drrs *drrs = &dev_priv->drrs;
3348 	int vrefresh = 0;
3349 
3350 	for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
3351 		/* Encoder connected on this CRTC */
3352 		switch (intel_encoder->type) {
3353 		case INTEL_OUTPUT_EDP:
3354 			seq_puts(m, "eDP:\n");
3355 			break;
3356 		case INTEL_OUTPUT_DSI:
3357 			seq_puts(m, "DSI:\n");
3358 			break;
3359 		case INTEL_OUTPUT_HDMI:
3360 			seq_puts(m, "HDMI:\n");
3361 			break;
3362 		case INTEL_OUTPUT_DISPLAYPORT:
3363 			seq_puts(m, "DP:\n");
3364 			break;
3365 		default:
3366 			seq_printf(m, "Other encoder (id=%d).\n",
3367 						intel_encoder->type);
3368 			return;
3369 		}
3370 	}
3371 
3372 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3373 		seq_puts(m, "\tVBT: DRRS_type: Static");
3374 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3375 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3376 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3377 		seq_puts(m, "\tVBT: DRRS_type: None");
3378 	else
3379 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3380 
3381 	seq_puts(m, "\n\n");
3382 
3383 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3384 		struct intel_panel *panel;
3385 
3386 		mutex_lock(&drrs->mutex);
3387 		/* DRRS Supported */
3388 		seq_puts(m, "\tDRRS Supported: Yes\n");
3389 
3390 		/* disable_drrs() will make drrs->dp NULL */
3391 		if (!drrs->dp) {
3392 			seq_puts(m, "Idleness DRRS: Disabled");
3393 			mutex_unlock(&drrs->mutex);
3394 			return;
3395 		}
3396 
3397 		panel = &drrs->dp->attached_connector->panel;
3398 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3399 					drrs->busy_frontbuffer_bits);
3400 
3401 		seq_puts(m, "\n\t\t");
3402 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3403 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3404 			vrefresh = panel->fixed_mode->vrefresh;
3405 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3406 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3407 			vrefresh = panel->downclock_mode->vrefresh;
3408 		} else {
3409 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3410 						drrs->refresh_rate_type);
3411 			mutex_unlock(&drrs->mutex);
3412 			return;
3413 		}
3414 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3415 
3416 		seq_puts(m, "\n\t\t");
3417 		mutex_unlock(&drrs->mutex);
3418 	} else {
3419 		/* DRRS not supported. Print the VBT parameter*/
3420 		seq_puts(m, "\tDRRS Supported : No");
3421 	}
3422 	seq_puts(m, "\n");
3423 }
3424 
3425 static int i915_drrs_status(struct seq_file *m, void *unused)
3426 {
3427 	struct drm_info_node *node = m->private;
3428 	struct drm_device *dev = node->minor->dev;
3429 	struct intel_crtc *intel_crtc;
3430 	int active_crtc_cnt = 0;
3431 
3432 	for_each_intel_crtc(dev, intel_crtc) {
3433 		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
3434 
3435 		if (intel_crtc->base.state->active) {
3436 			active_crtc_cnt++;
3437 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3438 
3439 			drrs_status_per_crtc(m, dev, intel_crtc);
3440 		}
3441 
3442 		drm_modeset_unlock(&intel_crtc->base.mutex);
3443 	}
3444 
3445 	if (!active_crtc_cnt)
3446 		seq_puts(m, "No active crtc found\n");
3447 
3448 	return 0;
3449 }
3450 
3451 struct pipe_crc_info {
3452 	const char *name;
3453 	struct drm_device *dev;
3454 	enum pipe pipe;
3455 };
3456 
3457 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3458 {
3459 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3460 	struct drm_device *dev = node->minor->dev;
3461 	struct drm_encoder *encoder;
3462 	struct intel_encoder *intel_encoder;
3463 	struct intel_digital_port *intel_dig_port;
3464 	drm_modeset_lock_all(dev);
3465 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3466 		intel_encoder = to_intel_encoder(encoder);
3467 		if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
3468 			continue;
3469 		intel_dig_port = enc_to_dig_port(encoder);
3470 		if (!intel_dig_port->dp.can_mst)
3471 			continue;
3472 		seq_printf(m, "MST Source Port %c\n",
3473 			   port_name(intel_dig_port->port));
3474 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3475 	}
3476 	drm_modeset_unlock_all(dev);
3477 	return 0;
3478 }
3479 
3480 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3481 {
3482 	struct pipe_crc_info *info = inode->i_private;
3483 	struct drm_i915_private *dev_priv = info->dev->dev_private;
3484 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3485 
3486 	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
3487 		return -ENODEV;
3488 
3489 	spin_lock_irq(&pipe_crc->lock);
3490 
3491 	if (pipe_crc->opened) {
3492 		spin_unlock_irq(&pipe_crc->lock);
3493 		return -EBUSY; /* already open */
3494 	}
3495 
3496 	pipe_crc->opened = true;
3497 	filep->private_data = inode->i_private;
3498 
3499 	spin_unlock_irq(&pipe_crc->lock);
3500 
3501 	return 0;
3502 }
3503 
3504 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3505 {
3506 	struct pipe_crc_info *info = inode->i_private;
3507 	struct drm_i915_private *dev_priv = info->dev->dev_private;
3508 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3509 
3510 	spin_lock_irq(&pipe_crc->lock);
3511 	pipe_crc->opened = false;
3512 	spin_unlock_irq(&pipe_crc->lock);
3513 
3514 	return 0;
3515 }
3516 
3517 /* (6 fields, 8 chars each, space separated (5) + '\n') */
3518 #define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
3519 /* account for \'0' */
3520 #define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
3521 
3522 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
3523 {
3524 	assert_spin_locked(&pipe_crc->lock);
3525 	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3526 			INTEL_PIPE_CRC_ENTRIES_NR);
3527 }
3528 
3529 static ssize_t
3530 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3531 		   loff_t *pos)
3532 {
3533 	struct pipe_crc_info *info = filep->private_data;
3534 	struct drm_device *dev = info->dev;
3535 	struct drm_i915_private *dev_priv = dev->dev_private;
3536 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3537 	char buf[PIPE_CRC_BUFFER_LEN];
3538 	int n_entries;
3539 	ssize_t bytes_read;
3540 
3541 	/*
3542 	 * Don't allow user space to provide buffers not big enough to hold
3543 	 * a line of data.
3544 	 */
3545 	if (count < PIPE_CRC_LINE_LEN)
3546 		return -EINVAL;
3547 
3548 	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
3549 		return 0;
3550 
3551 	/* nothing to read */
3552 	spin_lock_irq(&pipe_crc->lock);
3553 	while (pipe_crc_data_count(pipe_crc) == 0) {
3554 		int ret;
3555 
3556 		if (filep->f_flags & O_NONBLOCK) {
3557 			spin_unlock_irq(&pipe_crc->lock);
3558 			return -EAGAIN;
3559 		}
3560 
3561 		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
3562 				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
3563 		if (ret) {
3564 			spin_unlock_irq(&pipe_crc->lock);
3565 			return ret;
3566 		}
3567 	}
3568 
3569 	/* We now have one or more entries to read */
3570 	n_entries = count / PIPE_CRC_LINE_LEN;
3571 
3572 	bytes_read = 0;
3573 	while (n_entries > 0) {
3574 		struct intel_pipe_crc_entry *entry =
3575 			&pipe_crc->entries[pipe_crc->tail];
3576 		int ret;
3577 
3578 		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3579 			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
3580 			break;
3581 
3582 		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
3583 		pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
3584 
3585 		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
3586 				       "%8u %8x %8x %8x %8x %8x\n",
3587 				       entry->frame, entry->crc[0],
3588 				       entry->crc[1], entry->crc[2],
3589 				       entry->crc[3], entry->crc[4]);
3590 
3591 		spin_unlock_irq(&pipe_crc->lock);
3592 
3593 		ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
3594 		if (ret == PIPE_CRC_LINE_LEN)
3595 			return -EFAULT;
3596 
3597 		user_buf += PIPE_CRC_LINE_LEN;
3598 		n_entries--;
3599 
3600 		spin_lock_irq(&pipe_crc->lock);
3601 	}
3602 
3603 	spin_unlock_irq(&pipe_crc->lock);
3604 
3605 	return bytes_read;
3606 }
3607 
3608 static const struct file_operations i915_pipe_crc_fops = {
3609 	.owner = THIS_MODULE,
3610 	.open = i915_pipe_crc_open,
3611 	.read = i915_pipe_crc_read,
3612 	.release = i915_pipe_crc_release,
3613 };
3614 
3615 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
3616 	{
3617 		.name = "i915_pipe_A_crc",
3618 		.pipe = PIPE_A,
3619 	},
3620 	{
3621 		.name = "i915_pipe_B_crc",
3622 		.pipe = PIPE_B,
3623 	},
3624 	{
3625 		.name = "i915_pipe_C_crc",
3626 		.pipe = PIPE_C,
3627 	},
3628 };
3629 
3630 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
3631 				enum pipe pipe)
3632 {
3633 	struct drm_device *dev = minor->dev;
3634 	struct dentry *ent;
3635 	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
3636 
3637 	info->dev = dev;
3638 	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
3639 				  &i915_pipe_crc_fops);
3640 	if (!ent)
3641 		return -ENOMEM;
3642 
3643 	return drm_add_fake_info_node(minor, ent, info);
3644 }
3645 
3646 static const char * const pipe_crc_sources[] = {
3647 	"none",
3648 	"plane1",
3649 	"plane2",
3650 	"pf",
3651 	"pipe",
3652 	"TV",
3653 	"DP-B",
3654 	"DP-C",
3655 	"DP-D",
3656 	"auto",
3657 };
3658 
3659 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3660 {
3661 	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
3662 	return pipe_crc_sources[source];
3663 }
3664 
3665 static int display_crc_ctl_show(struct seq_file *m, void *data)
3666 {
3667 	struct drm_device *dev = m->private;
3668 	struct drm_i915_private *dev_priv = dev->dev_private;
3669 	int i;
3670 
3671 	for (i = 0; i < I915_MAX_PIPES; i++)
3672 		seq_printf(m, "%c %s\n", pipe_name(i),
3673 			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
3674 
3675 	return 0;
3676 }
3677 
3678 static int display_crc_ctl_open(struct inode *inode, struct file *file)
3679 {
3680 	struct drm_device *dev = inode->i_private;
3681 
3682 	return single_open(file, display_crc_ctl_show, dev);
3683 }
3684 
3685 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3686 				 uint32_t *val)
3687 {
3688 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3689 		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3690 
3691 	switch (*source) {
3692 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3693 		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
3694 		break;
3695 	case INTEL_PIPE_CRC_SOURCE_NONE:
3696 		*val = 0;
3697 		break;
3698 	default:
3699 		return -EINVAL;
3700 	}
3701 
3702 	return 0;
3703 }
3704 
3705 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
3706 				     enum intel_pipe_crc_source *source)
3707 {
3708 	struct intel_encoder *encoder;
3709 	struct intel_crtc *crtc;
3710 	struct intel_digital_port *dig_port;
3711 	int ret = 0;
3712 
3713 	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3714 
3715 	drm_modeset_lock_all(dev);
3716 	for_each_intel_encoder(dev, encoder) {
3717 		if (!encoder->base.crtc)
3718 			continue;
3719 
3720 		crtc = to_intel_crtc(encoder->base.crtc);
3721 
3722 		if (crtc->pipe != pipe)
3723 			continue;
3724 
3725 		switch (encoder->type) {
3726 		case INTEL_OUTPUT_TVOUT:
3727 			*source = INTEL_PIPE_CRC_SOURCE_TV;
3728 			break;
3729 		case INTEL_OUTPUT_DISPLAYPORT:
3730 		case INTEL_OUTPUT_EDP:
3731 			dig_port = enc_to_dig_port(&encoder->base);
3732 			switch (dig_port->port) {
3733 			case PORT_B:
3734 				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
3735 				break;
3736 			case PORT_C:
3737 				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
3738 				break;
3739 			case PORT_D:
3740 				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
3741 				break;
3742 			default:
3743 				WARN(1, "nonexisting DP port %c\n",
3744 				     port_name(dig_port->port));
3745 				break;
3746 			}
3747 			break;
3748 		default:
3749 			break;
3750 		}
3751 	}
3752 	drm_modeset_unlock_all(dev);
3753 
3754 	return ret;
3755 }
3756 
3757 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
3758 				enum pipe pipe,
3759 				enum intel_pipe_crc_source *source,
3760 				uint32_t *val)
3761 {
3762 	struct drm_i915_private *dev_priv = dev->dev_private;
3763 	bool need_stable_symbols = false;
3764 
3765 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3766 		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3767 		if (ret)
3768 			return ret;
3769 	}
3770 
3771 	switch (*source) {
3772 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3773 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
3774 		break;
3775 	case INTEL_PIPE_CRC_SOURCE_DP_B:
3776 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3777 		need_stable_symbols = true;
3778 		break;
3779 	case INTEL_PIPE_CRC_SOURCE_DP_C:
3780 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3781 		need_stable_symbols = true;
3782 		break;
3783 	case INTEL_PIPE_CRC_SOURCE_DP_D:
3784 		if (!IS_CHERRYVIEW(dev))
3785 			return -EINVAL;
3786 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
3787 		need_stable_symbols = true;
3788 		break;
3789 	case INTEL_PIPE_CRC_SOURCE_NONE:
3790 		*val = 0;
3791 		break;
3792 	default:
3793 		return -EINVAL;
3794 	}
3795 
3796 	/*
3797 	 * When the pipe CRC tap point is after the transcoders we need
3798 	 * to tweak symbol-level features to produce a deterministic series of
3799 	 * symbols for a given frame. We need to reset those features only once
3800 	 * a frame (instead of every nth symbol):
3801 	 *   - DC-balance: used to ensure a better clock recovery from the data
3802 	 *     link (SDVO)
3803 	 *   - DisplayPort scrambling: used for EMI reduction
3804 	 */
3805 	if (need_stable_symbols) {
3806 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3807 
3808 		tmp |= DC_BALANCE_RESET_VLV;
3809 		switch (pipe) {
3810 		case PIPE_A:
3811 			tmp |= PIPE_A_SCRAMBLE_RESET;
3812 			break;
3813 		case PIPE_B:
3814 			tmp |= PIPE_B_SCRAMBLE_RESET;
3815 			break;
3816 		case PIPE_C:
3817 			tmp |= PIPE_C_SCRAMBLE_RESET;
3818 			break;
3819 		default:
3820 			return -EINVAL;
3821 		}
3822 		I915_WRITE(PORT_DFT2_G4X, tmp);
3823 	}
3824 
3825 	return 0;
3826 }
3827 
3828 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3829 				 enum pipe pipe,
3830 				 enum intel_pipe_crc_source *source,
3831 				 uint32_t *val)
3832 {
3833 	struct drm_i915_private *dev_priv = dev->dev_private;
3834 	bool need_stable_symbols = false;
3835 
3836 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3837 		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3838 		if (ret)
3839 			return ret;
3840 	}
3841 
3842 	switch (*source) {
3843 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3844 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3845 		break;
3846 	case INTEL_PIPE_CRC_SOURCE_TV:
3847 		if (!SUPPORTS_TV(dev))
3848 			return -EINVAL;
3849 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3850 		break;
3851 	case INTEL_PIPE_CRC_SOURCE_DP_B:
3852 		if (!IS_G4X(dev))
3853 			return -EINVAL;
3854 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3855 		need_stable_symbols = true;
3856 		break;
3857 	case INTEL_PIPE_CRC_SOURCE_DP_C:
3858 		if (!IS_G4X(dev))
3859 			return -EINVAL;
3860 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3861 		need_stable_symbols = true;
3862 		break;
3863 	case INTEL_PIPE_CRC_SOURCE_DP_D:
3864 		if (!IS_G4X(dev))
3865 			return -EINVAL;
3866 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3867 		need_stable_symbols = true;
3868 		break;
3869 	case INTEL_PIPE_CRC_SOURCE_NONE:
3870 		*val = 0;
3871 		break;
3872 	default:
3873 		return -EINVAL;
3874 	}
3875 
3876 	/*
3877 	 * When the pipe CRC tap point is after the transcoders we need
3878 	 * to tweak symbol-level features to produce a deterministic series of
3879 	 * symbols for a given frame. We need to reset those features only once
3880 	 * a frame (instead of every nth symbol):
3881 	 *   - DC-balance: used to ensure a better clock recovery from the data
3882 	 *     link (SDVO)
3883 	 *   - DisplayPort scrambling: used for EMI reduction
3884 	 */
3885 	if (need_stable_symbols) {
3886 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3887 
3888 		WARN_ON(!IS_G4X(dev));
3889 
3890 		I915_WRITE(PORT_DFT_I9XX,
3891 			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3892 
3893 		if (pipe == PIPE_A)
3894 			tmp |= PIPE_A_SCRAMBLE_RESET;
3895 		else
3896 			tmp |= PIPE_B_SCRAMBLE_RESET;
3897 
3898 		I915_WRITE(PORT_DFT2_G4X, tmp);
3899 	}
3900 
3901 	return 0;
3902 }
3903 
3904 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3905 					 enum pipe pipe)
3906 {
3907 	struct drm_i915_private *dev_priv = dev->dev_private;
3908 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3909 
3910 	switch (pipe) {
3911 	case PIPE_A:
3912 		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3913 		break;
3914 	case PIPE_B:
3915 		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3916 		break;
3917 	case PIPE_C:
3918 		tmp &= ~PIPE_C_SCRAMBLE_RESET;
3919 		break;
3920 	default:
3921 		return;
3922 	}
3923 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
3924 		tmp &= ~DC_BALANCE_RESET_VLV;
3925 	I915_WRITE(PORT_DFT2_G4X, tmp);
3926 
3927 }
3928 
3929 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3930 					 enum pipe pipe)
3931 {
3932 	struct drm_i915_private *dev_priv = dev->dev_private;
3933 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3934 
3935 	if (pipe == PIPE_A)
3936 		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3937 	else
3938 		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3939 	I915_WRITE(PORT_DFT2_G4X, tmp);
3940 
3941 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
3942 		I915_WRITE(PORT_DFT_I9XX,
3943 			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
3944 	}
3945 }
3946 
3947 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3948 				uint32_t *val)
3949 {
3950 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3951 		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3952 
3953 	switch (*source) {
3954 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3955 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
3956 		break;
3957 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3958 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
3959 		break;
3960 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3961 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
3962 		break;
3963 	case INTEL_PIPE_CRC_SOURCE_NONE:
3964 		*val = 0;
3965 		break;
3966 	default:
3967 		return -EINVAL;
3968 	}
3969 
3970 	return 0;
3971 }
3972 
3973 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
3974 {
3975 	struct drm_i915_private *dev_priv = dev->dev_private;
3976 	struct intel_crtc *crtc =
3977 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3978 	struct intel_crtc_state *pipe_config;
3979 	struct drm_atomic_state *state;
3980 	int ret = 0;
3981 
3982 	drm_modeset_lock_all(dev);
3983 	state = drm_atomic_state_alloc(dev);
3984 	if (!state) {
3985 		ret = -ENOMEM;
3986 		goto out;
3987 	}
3988 
3989 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
3990 	pipe_config = intel_atomic_get_crtc_state(state, crtc);
3991 	if (IS_ERR(pipe_config)) {
3992 		ret = PTR_ERR(pipe_config);
3993 		goto out;
3994 	}
3995 
3996 	pipe_config->pch_pfit.force_thru = enable;
3997 	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
3998 	    pipe_config->pch_pfit.enabled != enable)
3999 		pipe_config->base.connectors_changed = true;
4000 
4001 	ret = drm_atomic_commit(state);
4002 out:
4003 	drm_modeset_unlock_all(dev);
4004 	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
4005 	if (ret)
4006 		drm_atomic_state_free(state);
4007 }
4008 
4009 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
4010 				enum pipe pipe,
4011 				enum intel_pipe_crc_source *source,
4012 				uint32_t *val)
4013 {
4014 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
4015 		*source = INTEL_PIPE_CRC_SOURCE_PF;
4016 
4017 	switch (*source) {
4018 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
4019 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
4020 		break;
4021 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
4022 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
4023 		break;
4024 	case INTEL_PIPE_CRC_SOURCE_PF:
4025 		if (IS_HASWELL(dev) && pipe == PIPE_A)
4026 			hsw_trans_edp_pipe_A_crc_wa(dev, true);
4027 
4028 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
4029 		break;
4030 	case INTEL_PIPE_CRC_SOURCE_NONE:
4031 		*val = 0;
4032 		break;
4033 	default:
4034 		return -EINVAL;
4035 	}
4036 
4037 	return 0;
4038 }
4039 
4040 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4041 			       enum intel_pipe_crc_source source)
4042 {
4043 	struct drm_i915_private *dev_priv = dev->dev_private;
4044 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4045 	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
4046 									pipe));
4047 	enum intel_display_power_domain power_domain;
4048 	u32 val = 0; /* shut up gcc */
4049 	int ret;
4050 
4051 	if (pipe_crc->source == source)
4052 		return 0;
4053 
4054 	/* forbid changing the source without going back to 'none' */
4055 	if (pipe_crc->source && source)
4056 		return -EINVAL;
4057 
4058 	power_domain = POWER_DOMAIN_PIPE(pipe);
4059 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
4060 		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
4061 		return -EIO;
4062 	}
4063 
4064 	if (IS_GEN2(dev))
4065 		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
4066 	else if (INTEL_INFO(dev)->gen < 5)
4067 		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4068 	else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4069 		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4070 	else if (IS_GEN5(dev) || IS_GEN6(dev))
4071 		ret = ilk_pipe_crc_ctl_reg(&source, &val);
4072 	else
4073 		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
4074 
4075 	if (ret != 0)
4076 		goto out;
4077 
4078 	/* none -> real source transition */
4079 	if (source) {
4080 		struct intel_pipe_crc_entry *entries;
4081 
4082 		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
4083 				 pipe_name(pipe), pipe_crc_source_name(source));
4084 
4085 		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
4086 				  sizeof(pipe_crc->entries[0]),
4087 				  GFP_KERNEL);
4088 		if (!entries) {
4089 			ret = -ENOMEM;
4090 			goto out;
4091 		}
4092 
4093 		/*
4094 		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
4095 		 * enabled and disabled dynamically based on package C states,
4096 		 * user space can't make reliable use of the CRCs, so let's just
4097 		 * completely disable it.
4098 		 */
4099 		hsw_disable_ips(crtc);
4100 
4101 		spin_lock_irq(&pipe_crc->lock);
4102 		kfree(pipe_crc->entries);
4103 		pipe_crc->entries = entries;
4104 		pipe_crc->head = 0;
4105 		pipe_crc->tail = 0;
4106 		spin_unlock_irq(&pipe_crc->lock);
4107 	}
4108 
4109 	pipe_crc->source = source;
4110 
4111 	I915_WRITE(PIPE_CRC_CTL(pipe), val);
4112 	POSTING_READ(PIPE_CRC_CTL(pipe));
4113 
4114 	/* real source -> none transition */
4115 	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
4116 		struct intel_pipe_crc_entry *entries;
4117 		struct intel_crtc *crtc =
4118 			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
4119 
4120 		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
4121 				 pipe_name(pipe));
4122 
4123 		drm_modeset_lock(&crtc->base.mutex, NULL);
4124 		if (crtc->base.state->active)
4125 			intel_wait_for_vblank(dev, pipe);
4126 		drm_modeset_unlock(&crtc->base.mutex);
4127 
4128 		spin_lock_irq(&pipe_crc->lock);
4129 		entries = pipe_crc->entries;
4130 		pipe_crc->entries = NULL;
4131 		pipe_crc->head = 0;
4132 		pipe_crc->tail = 0;
4133 		spin_unlock_irq(&pipe_crc->lock);
4134 
4135 		kfree(entries);
4136 
4137 		if (IS_G4X(dev))
4138 			g4x_undo_pipe_scramble_reset(dev, pipe);
4139 		else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
4140 			vlv_undo_pipe_scramble_reset(dev, pipe);
4141 		else if (IS_HASWELL(dev) && pipe == PIPE_A)
4142 			hsw_trans_edp_pipe_A_crc_wa(dev, false);
4143 
4144 		hsw_enable_ips(crtc);
4145 	}
4146 
4147 	ret = 0;
4148 
4149 out:
4150 	intel_display_power_put(dev_priv, power_domain);
4151 
4152 	return ret;
4153 }
4154 
4155 /*
4156  * Parse pipe CRC command strings:
4157  *   command: wsp* object wsp+ name wsp+ source wsp*
4158  *   object: 'pipe'
4159  *   name: (A | B | C)
4160  *   source: (none | plane1 | plane2 | pf)
4161  *   wsp: (#0x20 | #0x9 | #0xA)+
4162  *
4163  * eg.:
4164  *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
4165  *  "pipe A none"    ->  Stop CRC
4166  */
4167 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
4168 {
4169 	int n_words = 0;
4170 
4171 	while (*buf) {
4172 		char *end;
4173 
4174 		/* skip leading white space */
4175 		buf = skip_spaces(buf);
4176 		if (!*buf)
4177 			break;	/* end of buffer */
4178 
4179 		/* find end of word */
4180 		for (end = buf; *end && !isspace(*end); end++)
4181 			;
4182 
4183 		if (n_words == max_words) {
4184 			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
4185 					 max_words);
4186 			return -EINVAL;	/* ran out of words[] before bytes */
4187 		}
4188 
4189 		if (*end)
4190 			*end++ = '\0';
4191 		words[n_words++] = buf;
4192 		buf = end;
4193 	}
4194 
4195 	return n_words;
4196 }
4197 
4198 enum intel_pipe_crc_object {
4199 	PIPE_CRC_OBJECT_PIPE,
4200 };
4201 
4202 static const char * const pipe_crc_objects[] = {
4203 	"pipe",
4204 };
4205 
4206 static int
4207 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
4208 {
4209 	int i;
4210 
4211 	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
4212 		if (!strcmp(buf, pipe_crc_objects[i])) {
4213 			*o = i;
4214 			return 0;
4215 		    }
4216 
4217 	return -EINVAL;
4218 }
4219 
4220 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
4221 {
4222 	const char name = buf[0];
4223 
4224 	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
4225 		return -EINVAL;
4226 
4227 	*pipe = name - 'A';
4228 
4229 	return 0;
4230 }
4231 
4232 static int
4233 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
4234 {
4235 	int i;
4236 
4237 	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
4238 		if (!strcmp(buf, pipe_crc_sources[i])) {
4239 			*s = i;
4240 			return 0;
4241 		    }
4242 
4243 	return -EINVAL;
4244 }
4245 
4246 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
4247 {
4248 #define N_WORDS 3
4249 	int n_words;
4250 	char *words[N_WORDS];
4251 	enum pipe pipe;
4252 	enum intel_pipe_crc_object object;
4253 	enum intel_pipe_crc_source source;
4254 
4255 	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
4256 	if (n_words != N_WORDS) {
4257 		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
4258 				 N_WORDS);
4259 		return -EINVAL;
4260 	}
4261 
4262 	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
4263 		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
4264 		return -EINVAL;
4265 	}
4266 
4267 	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
4268 		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
4269 		return -EINVAL;
4270 	}
4271 
4272 	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
4273 		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
4274 		return -EINVAL;
4275 	}
4276 
4277 	return pipe_crc_set_source(dev, pipe, source);
4278 }
4279 
4280 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
4281 				     size_t len, loff_t *offp)
4282 {
4283 	struct seq_file *m = file->private_data;
4284 	struct drm_device *dev = m->private;
4285 	char *tmpbuf;
4286 	int ret;
4287 
4288 	if (len == 0)
4289 		return 0;
4290 
4291 	if (len > PAGE_SIZE - 1) {
4292 		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
4293 				 PAGE_SIZE);
4294 		return -E2BIG;
4295 	}
4296 
4297 	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
4298 	if (!tmpbuf)
4299 		return -ENOMEM;
4300 
4301 	if (copy_from_user(tmpbuf, ubuf, len)) {
4302 		ret = -EFAULT;
4303 		goto out;
4304 	}
4305 	tmpbuf[len] = '\0';
4306 
4307 	ret = display_crc_ctl_parse(dev, tmpbuf, len);
4308 
4309 out:
4310 	kfree(tmpbuf);
4311 	if (ret < 0)
4312 		return ret;
4313 
4314 	*offp += len;
4315 	return len;
4316 }
4317 
4318 static const struct file_operations i915_display_crc_ctl_fops = {
4319 	.owner = THIS_MODULE,
4320 	.open = display_crc_ctl_open,
4321 	.read = seq_read,
4322 	.llseek = seq_lseek,
4323 	.release = single_release,
4324 	.write = display_crc_ctl_write
4325 };
4326 
4327 static ssize_t i915_displayport_test_active_write(struct file *file,
4328 					    const char __user *ubuf,
4329 					    size_t len, loff_t *offp)
4330 {
4331 	char *input_buffer;
4332 	int status = 0;
4333 	struct drm_device *dev;
4334 	struct drm_connector *connector;
4335 	struct list_head *connector_list;
4336 	struct intel_dp *intel_dp;
4337 	int val = 0;
4338 
4339 	dev = ((struct seq_file *)file->private_data)->private;
4340 
4341 	connector_list = &dev->mode_config.connector_list;
4342 
4343 	if (len == 0)
4344 		return 0;
4345 
4346 	input_buffer = kmalloc(len + 1, GFP_KERNEL);
4347 	if (!input_buffer)
4348 		return -ENOMEM;
4349 
4350 	if (copy_from_user(input_buffer, ubuf, len)) {
4351 		status = -EFAULT;
4352 		goto out;
4353 	}
4354 
4355 	input_buffer[len] = '\0';
4356 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
4357 
4358 	list_for_each_entry(connector, connector_list, head) {
4359 
4360 		if (connector->connector_type !=
4361 		    DRM_MODE_CONNECTOR_DisplayPort)
4362 			continue;
4363 
4364 		if (connector->status == connector_status_connected &&
4365 		    connector->encoder != NULL) {
4366 			intel_dp = enc_to_intel_dp(connector->encoder);
4367 			status = kstrtoint(input_buffer, 10, &val);
4368 			if (status < 0)
4369 				goto out;
4370 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
4371 			/* To prevent erroneous activation of the compliance
4372 			 * testing code, only accept an actual value of 1 here
4373 			 */
4374 			if (val == 1)
4375 				intel_dp->compliance_test_active = 1;
4376 			else
4377 				intel_dp->compliance_test_active = 0;
4378 		}
4379 	}
4380 out:
4381 	kfree(input_buffer);
4382 	if (status < 0)
4383 		return status;
4384 
4385 	*offp += len;
4386 	return len;
4387 }
4388 
4389 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
4390 {
4391 	struct drm_device *dev = m->private;
4392 	struct drm_connector *connector;
4393 	struct list_head *connector_list = &dev->mode_config.connector_list;
4394 	struct intel_dp *intel_dp;
4395 
4396 	list_for_each_entry(connector, connector_list, head) {
4397 
4398 		if (connector->connector_type !=
4399 		    DRM_MODE_CONNECTOR_DisplayPort)
4400 			continue;
4401 
4402 		if (connector->status == connector_status_connected &&
4403 		    connector->encoder != NULL) {
4404 			intel_dp = enc_to_intel_dp(connector->encoder);
4405 			if (intel_dp->compliance_test_active)
4406 				seq_puts(m, "1");
4407 			else
4408 				seq_puts(m, "0");
4409 		} else
4410 			seq_puts(m, "0");
4411 	}
4412 
4413 	return 0;
4414 }
4415 
4416 static int i915_displayport_test_active_open(struct inode *inode,
4417 				       struct file *file)
4418 {
4419 	struct drm_device *dev = inode->i_private;
4420 
4421 	return single_open(file, i915_displayport_test_active_show, dev);
4422 }
4423 
4424 static const struct file_operations i915_displayport_test_active_fops = {
4425 	.owner = THIS_MODULE,
4426 	.open = i915_displayport_test_active_open,
4427 	.read = seq_read,
4428 	.llseek = seq_lseek,
4429 	.release = single_release,
4430 	.write = i915_displayport_test_active_write
4431 };
4432 
4433 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
4434 {
4435 	struct drm_device *dev = m->private;
4436 	struct drm_connector *connector;
4437 	struct list_head *connector_list = &dev->mode_config.connector_list;
4438 	struct intel_dp *intel_dp;
4439 
4440 	list_for_each_entry(connector, connector_list, head) {
4441 
4442 		if (connector->connector_type !=
4443 		    DRM_MODE_CONNECTOR_DisplayPort)
4444 			continue;
4445 
4446 		if (connector->status == connector_status_connected &&
4447 		    connector->encoder != NULL) {
4448 			intel_dp = enc_to_intel_dp(connector->encoder);
4449 			seq_printf(m, "%lx", intel_dp->compliance_test_data);
4450 		} else
4451 			seq_puts(m, "0");
4452 	}
4453 
4454 	return 0;
4455 }
4456 static int i915_displayport_test_data_open(struct inode *inode,
4457 				       struct file *file)
4458 {
4459 	struct drm_device *dev = inode->i_private;
4460 
4461 	return single_open(file, i915_displayport_test_data_show, dev);
4462 }
4463 
4464 static const struct file_operations i915_displayport_test_data_fops = {
4465 	.owner = THIS_MODULE,
4466 	.open = i915_displayport_test_data_open,
4467 	.read = seq_read,
4468 	.llseek = seq_lseek,
4469 	.release = single_release
4470 };
4471 
4472 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
4473 {
4474 	struct drm_device *dev = m->private;
4475 	struct drm_connector *connector;
4476 	struct list_head *connector_list = &dev->mode_config.connector_list;
4477 	struct intel_dp *intel_dp;
4478 
4479 	list_for_each_entry(connector, connector_list, head) {
4480 
4481 		if (connector->connector_type !=
4482 		    DRM_MODE_CONNECTOR_DisplayPort)
4483 			continue;
4484 
4485 		if (connector->status == connector_status_connected &&
4486 		    connector->encoder != NULL) {
4487 			intel_dp = enc_to_intel_dp(connector->encoder);
4488 			seq_printf(m, "%02lx", intel_dp->compliance_test_type);
4489 		} else
4490 			seq_puts(m, "0");
4491 	}
4492 
4493 	return 0;
4494 }
4495 
4496 static int i915_displayport_test_type_open(struct inode *inode,
4497 				       struct file *file)
4498 {
4499 	struct drm_device *dev = inode->i_private;
4500 
4501 	return single_open(file, i915_displayport_test_type_show, dev);
4502 }
4503 
4504 static const struct file_operations i915_displayport_test_type_fops = {
4505 	.owner = THIS_MODULE,
4506 	.open = i915_displayport_test_type_open,
4507 	.read = seq_read,
4508 	.llseek = seq_lseek,
4509 	.release = single_release
4510 };
4511 
4512 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
4513 {
4514 	struct drm_device *dev = m->private;
4515 	int level;
4516 	int num_levels;
4517 
4518 	if (IS_CHERRYVIEW(dev))
4519 		num_levels = 3;
4520 	else if (IS_VALLEYVIEW(dev))
4521 		num_levels = 1;
4522 	else
4523 		num_levels = ilk_wm_max_level(dev) + 1;
4524 
4525 	drm_modeset_lock_all(dev);
4526 
4527 	for (level = 0; level < num_levels; level++) {
4528 		unsigned int latency = wm[level];
4529 
4530 		/*
4531 		 * - WM1+ latency values in 0.5us units
4532 		 * - latencies are in us on gen9/vlv/chv
4533 		 */
4534 		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev) ||
4535 		    IS_CHERRYVIEW(dev))
4536 			latency *= 10;
4537 		else if (level > 0)
4538 			latency *= 5;
4539 
4540 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
4541 			   level, wm[level], latency / 10, latency % 10);
4542 	}
4543 
4544 	drm_modeset_unlock_all(dev);
4545 }
4546 
4547 static int pri_wm_latency_show(struct seq_file *m, void *data)
4548 {
4549 	struct drm_device *dev = m->private;
4550 	struct drm_i915_private *dev_priv = dev->dev_private;
4551 	const uint16_t *latencies;
4552 
4553 	if (INTEL_INFO(dev)->gen >= 9)
4554 		latencies = dev_priv->wm.skl_latency;
4555 	else
4556 		latencies = to_i915(dev)->wm.pri_latency;
4557 
4558 	wm_latency_show(m, latencies);
4559 
4560 	return 0;
4561 }
4562 
4563 static int spr_wm_latency_show(struct seq_file *m, void *data)
4564 {
4565 	struct drm_device *dev = m->private;
4566 	struct drm_i915_private *dev_priv = dev->dev_private;
4567 	const uint16_t *latencies;
4568 
4569 	if (INTEL_INFO(dev)->gen >= 9)
4570 		latencies = dev_priv->wm.skl_latency;
4571 	else
4572 		latencies = to_i915(dev)->wm.spr_latency;
4573 
4574 	wm_latency_show(m, latencies);
4575 
4576 	return 0;
4577 }
4578 
4579 static int cur_wm_latency_show(struct seq_file *m, void *data)
4580 {
4581 	struct drm_device *dev = m->private;
4582 	struct drm_i915_private *dev_priv = dev->dev_private;
4583 	const uint16_t *latencies;
4584 
4585 	if (INTEL_INFO(dev)->gen >= 9)
4586 		latencies = dev_priv->wm.skl_latency;
4587 	else
4588 		latencies = to_i915(dev)->wm.cur_latency;
4589 
4590 	wm_latency_show(m, latencies);
4591 
4592 	return 0;
4593 }
4594 
4595 static int pri_wm_latency_open(struct inode *inode, struct file *file)
4596 {
4597 	struct drm_device *dev = inode->i_private;
4598 
4599 	if (INTEL_INFO(dev)->gen < 5)
4600 		return -ENODEV;
4601 
4602 	return single_open(file, pri_wm_latency_show, dev);
4603 }
4604 
4605 static int spr_wm_latency_open(struct inode *inode, struct file *file)
4606 {
4607 	struct drm_device *dev = inode->i_private;
4608 
4609 	if (HAS_GMCH_DISPLAY(dev))
4610 		return -ENODEV;
4611 
4612 	return single_open(file, spr_wm_latency_show, dev);
4613 }
4614 
4615 static int cur_wm_latency_open(struct inode *inode, struct file *file)
4616 {
4617 	struct drm_device *dev = inode->i_private;
4618 
4619 	if (HAS_GMCH_DISPLAY(dev))
4620 		return -ENODEV;
4621 
4622 	return single_open(file, cur_wm_latency_show, dev);
4623 }
4624 
4625 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
4626 				size_t len, loff_t *offp, uint16_t wm[8])
4627 {
4628 	struct seq_file *m = file->private_data;
4629 	struct drm_device *dev = m->private;
4630 	uint16_t new[8] = { 0 };
4631 	int num_levels;
4632 	int level;
4633 	int ret;
4634 	char tmp[32];
4635 
4636 	if (IS_CHERRYVIEW(dev))
4637 		num_levels = 3;
4638 	else if (IS_VALLEYVIEW(dev))
4639 		num_levels = 1;
4640 	else
4641 		num_levels = ilk_wm_max_level(dev) + 1;
4642 
4643 	if (len >= sizeof(tmp))
4644 		return -EINVAL;
4645 
4646 	if (copy_from_user(tmp, ubuf, len))
4647 		return -EFAULT;
4648 
4649 	tmp[len] = '\0';
4650 
4651 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
4652 		     &new[0], &new[1], &new[2], &new[3],
4653 		     &new[4], &new[5], &new[6], &new[7]);
4654 	if (ret != num_levels)
4655 		return -EINVAL;
4656 
4657 	drm_modeset_lock_all(dev);
4658 
4659 	for (level = 0; level < num_levels; level++)
4660 		wm[level] = new[level];
4661 
4662 	drm_modeset_unlock_all(dev);
4663 
4664 	return len;
4665 }
4666 
4667 
4668 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4669 				    size_t len, loff_t *offp)
4670 {
4671 	struct seq_file *m = file->private_data;
4672 	struct drm_device *dev = m->private;
4673 	struct drm_i915_private *dev_priv = dev->dev_private;
4674 	uint16_t *latencies;
4675 
4676 	if (INTEL_INFO(dev)->gen >= 9)
4677 		latencies = dev_priv->wm.skl_latency;
4678 	else
4679 		latencies = to_i915(dev)->wm.pri_latency;
4680 
4681 	return wm_latency_write(file, ubuf, len, offp, latencies);
4682 }
4683 
4684 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4685 				    size_t len, loff_t *offp)
4686 {
4687 	struct seq_file *m = file->private_data;
4688 	struct drm_device *dev = m->private;
4689 	struct drm_i915_private *dev_priv = dev->dev_private;
4690 	uint16_t *latencies;
4691 
4692 	if (INTEL_INFO(dev)->gen >= 9)
4693 		latencies = dev_priv->wm.skl_latency;
4694 	else
4695 		latencies = to_i915(dev)->wm.spr_latency;
4696 
4697 	return wm_latency_write(file, ubuf, len, offp, latencies);
4698 }
4699 
4700 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4701 				    size_t len, loff_t *offp)
4702 {
4703 	struct seq_file *m = file->private_data;
4704 	struct drm_device *dev = m->private;
4705 	struct drm_i915_private *dev_priv = dev->dev_private;
4706 	uint16_t *latencies;
4707 
4708 	if (INTEL_INFO(dev)->gen >= 9)
4709 		latencies = dev_priv->wm.skl_latency;
4710 	else
4711 		latencies = to_i915(dev)->wm.cur_latency;
4712 
4713 	return wm_latency_write(file, ubuf, len, offp, latencies);
4714 }
4715 
4716 static const struct file_operations i915_pri_wm_latency_fops = {
4717 	.owner = THIS_MODULE,
4718 	.open = pri_wm_latency_open,
4719 	.read = seq_read,
4720 	.llseek = seq_lseek,
4721 	.release = single_release,
4722 	.write = pri_wm_latency_write
4723 };
4724 
4725 static const struct file_operations i915_spr_wm_latency_fops = {
4726 	.owner = THIS_MODULE,
4727 	.open = spr_wm_latency_open,
4728 	.read = seq_read,
4729 	.llseek = seq_lseek,
4730 	.release = single_release,
4731 	.write = spr_wm_latency_write
4732 };
4733 
4734 static const struct file_operations i915_cur_wm_latency_fops = {
4735 	.owner = THIS_MODULE,
4736 	.open = cur_wm_latency_open,
4737 	.read = seq_read,
4738 	.llseek = seq_lseek,
4739 	.release = single_release,
4740 	.write = cur_wm_latency_write
4741 };
4742 
4743 static int
4744 i915_wedged_get(void *data, u64 *val)
4745 {
4746 	struct drm_device *dev = data;
4747 	struct drm_i915_private *dev_priv = dev->dev_private;
4748 
4749 	*val = i915_terminally_wedged(&dev_priv->gpu_error);
4750 
4751 	return 0;
4752 }
4753 
4754 static int
4755 i915_wedged_set(void *data, u64 val)
4756 {
4757 	struct drm_device *dev = data;
4758 	struct drm_i915_private *dev_priv = dev->dev_private;
4759 
4760 	/*
4761 	 * There is no safeguard against this debugfs entry colliding
4762 	 * with the hangcheck calling same i915_handle_error() in
4763 	 * parallel, causing an explosion. For now we assume that the
4764 	 * test harness is responsible enough not to inject gpu hangs
4765 	 * while it is writing to 'i915_wedged'
4766 	 */
4767 
4768 	if (i915_reset_in_progress(&dev_priv->gpu_error))
4769 		return -EAGAIN;
4770 
4771 	intel_runtime_pm_get(dev_priv);
4772 
4773 	i915_handle_error(dev, val,
4774 			  "Manually setting wedged to %llu", val);
4775 
4776 	intel_runtime_pm_put(dev_priv);
4777 
4778 	return 0;
4779 }
4780 
4781 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4782 			i915_wedged_get, i915_wedged_set,
4783 			"%llu\n");
4784 
4785 static int
4786 i915_ring_stop_get(void *data, u64 *val)
4787 {
4788 	struct drm_device *dev = data;
4789 	struct drm_i915_private *dev_priv = dev->dev_private;
4790 
4791 	*val = dev_priv->gpu_error.stop_rings;
4792 
4793 	return 0;
4794 }
4795 
4796 static int
4797 i915_ring_stop_set(void *data, u64 val)
4798 {
4799 	struct drm_device *dev = data;
4800 	struct drm_i915_private *dev_priv = dev->dev_private;
4801 	int ret;
4802 
4803 	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4804 
4805 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4806 	if (ret)
4807 		return ret;
4808 
4809 	dev_priv->gpu_error.stop_rings = val;
4810 	mutex_unlock(&dev->struct_mutex);
4811 
4812 	return 0;
4813 }
4814 
4815 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
4816 			i915_ring_stop_get, i915_ring_stop_set,
4817 			"0x%08llx\n");
4818 
4819 static int
4820 i915_ring_missed_irq_get(void *data, u64 *val)
4821 {
4822 	struct drm_device *dev = data;
4823 	struct drm_i915_private *dev_priv = dev->dev_private;
4824 
4825 	*val = dev_priv->gpu_error.missed_irq_rings;
4826 	return 0;
4827 }
4828 
4829 static int
4830 i915_ring_missed_irq_set(void *data, u64 val)
4831 {
4832 	struct drm_device *dev = data;
4833 	struct drm_i915_private *dev_priv = dev->dev_private;
4834 	int ret;
4835 
4836 	/* Lock against concurrent debugfs callers */
4837 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4838 	if (ret)
4839 		return ret;
4840 	dev_priv->gpu_error.missed_irq_rings = val;
4841 	mutex_unlock(&dev->struct_mutex);
4842 
4843 	return 0;
4844 }
4845 
4846 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4847 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4848 			"0x%08llx\n");
4849 
4850 static int
4851 i915_ring_test_irq_get(void *data, u64 *val)
4852 {
4853 	struct drm_device *dev = data;
4854 	struct drm_i915_private *dev_priv = dev->dev_private;
4855 
4856 	*val = dev_priv->gpu_error.test_irq_rings;
4857 
4858 	return 0;
4859 }
4860 
4861 static int
4862 i915_ring_test_irq_set(void *data, u64 val)
4863 {
4864 	struct drm_device *dev = data;
4865 	struct drm_i915_private *dev_priv = dev->dev_private;
4866 	int ret;
4867 
4868 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4869 
4870 	/* Lock against concurrent debugfs callers */
4871 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4872 	if (ret)
4873 		return ret;
4874 
4875 	dev_priv->gpu_error.test_irq_rings = val;
4876 	mutex_unlock(&dev->struct_mutex);
4877 
4878 	return 0;
4879 }
4880 
4881 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4882 			i915_ring_test_irq_get, i915_ring_test_irq_set,
4883 			"0x%08llx\n");
4884 
4885 #define DROP_UNBOUND 0x1
4886 #define DROP_BOUND 0x2
4887 #define DROP_RETIRE 0x4
4888 #define DROP_ACTIVE 0x8
4889 #define DROP_ALL (DROP_UNBOUND | \
4890 		  DROP_BOUND | \
4891 		  DROP_RETIRE | \
4892 		  DROP_ACTIVE)
4893 static int
4894 i915_drop_caches_get(void *data, u64 *val)
4895 {
4896 	*val = DROP_ALL;
4897 
4898 	return 0;
4899 }
4900 
4901 static int
4902 i915_drop_caches_set(void *data, u64 val)
4903 {
4904 	struct drm_device *dev = data;
4905 	struct drm_i915_private *dev_priv = dev->dev_private;
4906 	int ret;
4907 
4908 	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4909 
4910 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4911 	 * on ioctls on -EAGAIN. */
4912 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4913 	if (ret)
4914 		return ret;
4915 
4916 	if (val & DROP_ACTIVE) {
4917 		ret = i915_gpu_idle(dev);
4918 		if (ret)
4919 			goto unlock;
4920 	}
4921 
4922 	if (val & (DROP_RETIRE | DROP_ACTIVE))
4923 		i915_gem_retire_requests(dev);
4924 
4925 	if (val & DROP_BOUND)
4926 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4927 
4928 	if (val & DROP_UNBOUND)
4929 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4930 
4931 unlock:
4932 	mutex_unlock(&dev->struct_mutex);
4933 
4934 	return ret;
4935 }
4936 
4937 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4938 			i915_drop_caches_get, i915_drop_caches_set,
4939 			"0x%08llx\n");
4940 
4941 static int
4942 i915_max_freq_get(void *data, u64 *val)
4943 {
4944 	struct drm_device *dev = data;
4945 	struct drm_i915_private *dev_priv = dev->dev_private;
4946 	int ret;
4947 
4948 	if (INTEL_INFO(dev)->gen < 6)
4949 		return -ENODEV;
4950 
4951 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4952 
4953 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4954 	if (ret)
4955 		return ret;
4956 
4957 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4958 	mutex_unlock(&dev_priv->rps.hw_lock);
4959 
4960 	return 0;
4961 }
4962 
4963 static int
4964 i915_max_freq_set(void *data, u64 val)
4965 {
4966 	struct drm_device *dev = data;
4967 	struct drm_i915_private *dev_priv = dev->dev_private;
4968 	u32 hw_max, hw_min;
4969 	int ret;
4970 
4971 	if (INTEL_INFO(dev)->gen < 6)
4972 		return -ENODEV;
4973 
4974 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4975 
4976 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4977 
4978 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4979 	if (ret)
4980 		return ret;
4981 
4982 	/*
4983 	 * Turbo will still be enabled, but won't go above the set value.
4984 	 */
4985 	val = intel_freq_opcode(dev_priv, val);
4986 
4987 	hw_max = dev_priv->rps.max_freq;
4988 	hw_min = dev_priv->rps.min_freq;
4989 
4990 	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4991 		mutex_unlock(&dev_priv->rps.hw_lock);
4992 		return -EINVAL;
4993 	}
4994 
4995 	dev_priv->rps.max_freq_softlimit = val;
4996 
4997 	intel_set_rps(dev, val);
4998 
4999 	mutex_unlock(&dev_priv->rps.hw_lock);
5000 
5001 	return 0;
5002 }
5003 
5004 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
5005 			i915_max_freq_get, i915_max_freq_set,
5006 			"%llu\n");
5007 
5008 static int
5009 i915_min_freq_get(void *data, u64 *val)
5010 {
5011 	struct drm_device *dev = data;
5012 	struct drm_i915_private *dev_priv = dev->dev_private;
5013 	int ret;
5014 
5015 	if (INTEL_INFO(dev)->gen < 6)
5016 		return -ENODEV;
5017 
5018 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5019 
5020 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
5021 	if (ret)
5022 		return ret;
5023 
5024 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
5025 	mutex_unlock(&dev_priv->rps.hw_lock);
5026 
5027 	return 0;
5028 }
5029 
5030 static int
5031 i915_min_freq_set(void *data, u64 val)
5032 {
5033 	struct drm_device *dev = data;
5034 	struct drm_i915_private *dev_priv = dev->dev_private;
5035 	u32 hw_max, hw_min;
5036 	int ret;
5037 
5038 	if (INTEL_INFO(dev)->gen < 6)
5039 		return -ENODEV;
5040 
5041 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5042 
5043 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
5044 
5045 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
5046 	if (ret)
5047 		return ret;
5048 
5049 	/*
5050 	 * Turbo will still be enabled, but won't go below the set value.
5051 	 */
5052 	val = intel_freq_opcode(dev_priv, val);
5053 
5054 	hw_max = dev_priv->rps.max_freq;
5055 	hw_min = dev_priv->rps.min_freq;
5056 
5057 	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
5058 		mutex_unlock(&dev_priv->rps.hw_lock);
5059 		return -EINVAL;
5060 	}
5061 
5062 	dev_priv->rps.min_freq_softlimit = val;
5063 
5064 	intel_set_rps(dev, val);
5065 
5066 	mutex_unlock(&dev_priv->rps.hw_lock);
5067 
5068 	return 0;
5069 }
5070 
5071 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
5072 			i915_min_freq_get, i915_min_freq_set,
5073 			"%llu\n");
5074 
5075 static int
5076 i915_cache_sharing_get(void *data, u64 *val)
5077 {
5078 	struct drm_device *dev = data;
5079 	struct drm_i915_private *dev_priv = dev->dev_private;
5080 	u32 snpcr;
5081 	int ret;
5082 
5083 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
5084 		return -ENODEV;
5085 
5086 	ret = mutex_lock_interruptible(&dev->struct_mutex);
5087 	if (ret)
5088 		return ret;
5089 	intel_runtime_pm_get(dev_priv);
5090 
5091 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5092 
5093 	intel_runtime_pm_put(dev_priv);
5094 	mutex_unlock(&dev_priv->dev->struct_mutex);
5095 
5096 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
5097 
5098 	return 0;
5099 }
5100 
5101 static int
5102 i915_cache_sharing_set(void *data, u64 val)
5103 {
5104 	struct drm_device *dev = data;
5105 	struct drm_i915_private *dev_priv = dev->dev_private;
5106 	u32 snpcr;
5107 
5108 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
5109 		return -ENODEV;
5110 
5111 	if (val > 3)
5112 		return -EINVAL;
5113 
5114 	intel_runtime_pm_get(dev_priv);
5115 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
5116 
5117 	/* Update the cache sharing policy here as well */
5118 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5119 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
5120 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
5121 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5122 
5123 	intel_runtime_pm_put(dev_priv);
5124 	return 0;
5125 }
5126 
5127 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
5128 			i915_cache_sharing_get, i915_cache_sharing_set,
5129 			"%llu\n");
5130 
5131 struct sseu_dev_status {
5132 	unsigned int slice_total;
5133 	unsigned int subslice_total;
5134 	unsigned int subslice_per_slice;
5135 	unsigned int eu_total;
5136 	unsigned int eu_per_subslice;
5137 };
5138 
5139 static void cherryview_sseu_device_status(struct drm_device *dev,
5140 					  struct sseu_dev_status *stat)
5141 {
5142 	struct drm_i915_private *dev_priv = dev->dev_private;
5143 	int ss_max = 2;
5144 	int ss;
5145 	u32 sig1[ss_max], sig2[ss_max];
5146 
5147 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
5148 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
5149 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
5150 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
5151 
5152 	for (ss = 0; ss < ss_max; ss++) {
5153 		unsigned int eu_cnt;
5154 
5155 		if (sig1[ss] & CHV_SS_PG_ENABLE)
5156 			/* skip disabled subslice */
5157 			continue;
5158 
5159 		stat->slice_total = 1;
5160 		stat->subslice_per_slice++;
5161 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
5162 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
5163 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
5164 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
5165 		stat->eu_total += eu_cnt;
5166 		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
5167 	}
5168 	stat->subslice_total = stat->subslice_per_slice;
5169 }
5170 
5171 static void gen9_sseu_device_status(struct drm_device *dev,
5172 				    struct sseu_dev_status *stat)
5173 {
5174 	struct drm_i915_private *dev_priv = dev->dev_private;
5175 	int s_max = 3, ss_max = 4;
5176 	int s, ss;
5177 	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
5178 
5179 	/* BXT has a single slice and at most 3 subslices. */
5180 	if (IS_BROXTON(dev)) {
5181 		s_max = 1;
5182 		ss_max = 3;
5183 	}
5184 
5185 	for (s = 0; s < s_max; s++) {
5186 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
5187 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
5188 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
5189 	}
5190 
5191 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
5192 		     GEN9_PGCTL_SSA_EU19_ACK |
5193 		     GEN9_PGCTL_SSA_EU210_ACK |
5194 		     GEN9_PGCTL_SSA_EU311_ACK;
5195 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
5196 		     GEN9_PGCTL_SSB_EU19_ACK |
5197 		     GEN9_PGCTL_SSB_EU210_ACK |
5198 		     GEN9_PGCTL_SSB_EU311_ACK;
5199 
5200 	for (s = 0; s < s_max; s++) {
5201 		unsigned int ss_cnt = 0;
5202 
5203 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
5204 			/* skip disabled slice */
5205 			continue;
5206 
5207 		stat->slice_total++;
5208 
5209 		if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
5210 			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
5211 
5212 		for (ss = 0; ss < ss_max; ss++) {
5213 			unsigned int eu_cnt;
5214 
5215 			if (IS_BROXTON(dev) &&
5216 			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
5217 				/* skip disabled subslice */
5218 				continue;
5219 
5220 			if (IS_BROXTON(dev))
5221 				ss_cnt++;
5222 
5223 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
5224 					       eu_mask[ss%2]);
5225 			stat->eu_total += eu_cnt;
5226 			stat->eu_per_subslice = max(stat->eu_per_subslice,
5227 						    eu_cnt);
5228 		}
5229 
5230 		stat->subslice_total += ss_cnt;
5231 		stat->subslice_per_slice = max(stat->subslice_per_slice,
5232 					       ss_cnt);
5233 	}
5234 }
5235 
5236 static void broadwell_sseu_device_status(struct drm_device *dev,
5237 					 struct sseu_dev_status *stat)
5238 {
5239 	struct drm_i915_private *dev_priv = dev->dev_private;
5240 	int s;
5241 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
5242 
5243 	stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
5244 
5245 	if (stat->slice_total) {
5246 		stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
5247 		stat->subslice_total = stat->slice_total *
5248 				       stat->subslice_per_slice;
5249 		stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
5250 		stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
5251 
5252 		/* subtract fused off EU(s) from enabled slice(s) */
5253 		for (s = 0; s < stat->slice_total; s++) {
5254 			u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
5255 
5256 			stat->eu_total -= hweight8(subslice_7eu);
5257 		}
5258 	}
5259 }
5260 
5261 static int i915_sseu_status(struct seq_file *m, void *unused)
5262 {
5263 	struct drm_info_node *node = (struct drm_info_node *) m->private;
5264 	struct drm_device *dev = node->minor->dev;
5265 	struct sseu_dev_status stat;
5266 
5267 	if (INTEL_INFO(dev)->gen < 8)
5268 		return -ENODEV;
5269 
5270 	seq_puts(m, "SSEU Device Info\n");
5271 	seq_printf(m, "  Available Slice Total: %u\n",
5272 		   INTEL_INFO(dev)->slice_total);
5273 	seq_printf(m, "  Available Subslice Total: %u\n",
5274 		   INTEL_INFO(dev)->subslice_total);
5275 	seq_printf(m, "  Available Subslice Per Slice: %u\n",
5276 		   INTEL_INFO(dev)->subslice_per_slice);
5277 	seq_printf(m, "  Available EU Total: %u\n",
5278 		   INTEL_INFO(dev)->eu_total);
5279 	seq_printf(m, "  Available EU Per Subslice: %u\n",
5280 		   INTEL_INFO(dev)->eu_per_subslice);
5281 	seq_printf(m, "  Has Slice Power Gating: %s\n",
5282 		   yesno(INTEL_INFO(dev)->has_slice_pg));
5283 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
5284 		   yesno(INTEL_INFO(dev)->has_subslice_pg));
5285 	seq_printf(m, "  Has EU Power Gating: %s\n",
5286 		   yesno(INTEL_INFO(dev)->has_eu_pg));
5287 
5288 	seq_puts(m, "SSEU Device Status\n");
5289 	memset(&stat, 0, sizeof(stat));
5290 	if (IS_CHERRYVIEW(dev)) {
5291 		cherryview_sseu_device_status(dev, &stat);
5292 	} else if (IS_BROADWELL(dev)) {
5293 		broadwell_sseu_device_status(dev, &stat);
5294 	} else if (INTEL_INFO(dev)->gen >= 9) {
5295 		gen9_sseu_device_status(dev, &stat);
5296 	}
5297 	seq_printf(m, "  Enabled Slice Total: %u\n",
5298 		   stat.slice_total);
5299 	seq_printf(m, "  Enabled Subslice Total: %u\n",
5300 		   stat.subslice_total);
5301 	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
5302 		   stat.subslice_per_slice);
5303 	seq_printf(m, "  Enabled EU Total: %u\n",
5304 		   stat.eu_total);
5305 	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
5306 		   stat.eu_per_subslice);
5307 
5308 	return 0;
5309 }
5310 
5311 static int i915_forcewake_open(struct inode *inode, struct file *file)
5312 {
5313 	struct drm_device *dev = inode->i_private;
5314 	struct drm_i915_private *dev_priv = dev->dev_private;
5315 
5316 	if (INTEL_INFO(dev)->gen < 6)
5317 		return 0;
5318 
5319 	intel_runtime_pm_get(dev_priv);
5320 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5321 
5322 	return 0;
5323 }
5324 
5325 static int i915_forcewake_release(struct inode *inode, struct file *file)
5326 {
5327 	struct drm_device *dev = inode->i_private;
5328 	struct drm_i915_private *dev_priv = dev->dev_private;
5329 
5330 	if (INTEL_INFO(dev)->gen < 6)
5331 		return 0;
5332 
5333 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5334 	intel_runtime_pm_put(dev_priv);
5335 
5336 	return 0;
5337 }
5338 
5339 static const struct file_operations i915_forcewake_fops = {
5340 	.owner = THIS_MODULE,
5341 	.open = i915_forcewake_open,
5342 	.release = i915_forcewake_release,
5343 };
5344 
5345 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
5346 {
5347 	struct drm_device *dev = minor->dev;
5348 	struct dentry *ent;
5349 
5350 	ent = debugfs_create_file("i915_forcewake_user",
5351 				  S_IRUSR,
5352 				  root, dev,
5353 				  &i915_forcewake_fops);
5354 	if (!ent)
5355 		return -ENOMEM;
5356 
5357 	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
5358 }
5359 
5360 static int i915_debugfs_create(struct dentry *root,
5361 			       struct drm_minor *minor,
5362 			       const char *name,
5363 			       const struct file_operations *fops)
5364 {
5365 	struct drm_device *dev = minor->dev;
5366 	struct dentry *ent;
5367 
5368 	ent = debugfs_create_file(name,
5369 				  S_IRUGO | S_IWUSR,
5370 				  root, dev,
5371 				  fops);
5372 	if (!ent)
5373 		return -ENOMEM;
5374 
5375 	return drm_add_fake_info_node(minor, ent, fops);
5376 }
5377 
5378 static const struct drm_info_list i915_debugfs_list[] = {
5379 	{"i915_capabilities", i915_capabilities, 0},
5380 	{"i915_gem_objects", i915_gem_object_info, 0},
5381 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
5382 	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
5383 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
5384 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
5385 	{"i915_gem_stolen", i915_gem_stolen_list_info },
5386 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
5387 	{"i915_gem_request", i915_gem_request_info, 0},
5388 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
5389 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
5390 	{"i915_gem_interrupt", i915_interrupt_info, 0},
5391 	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
5392 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
5393 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
5394 	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
5395 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
5396 	{"i915_guc_info", i915_guc_info, 0},
5397 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
5398 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
5399 	{"i915_frequency_info", i915_frequency_info, 0},
5400 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
5401 	{"i915_drpc_info", i915_drpc_info, 0},
5402 	{"i915_emon_status", i915_emon_status, 0},
5403 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
5404 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
5405 	{"i915_fbc_status", i915_fbc_status, 0},
5406 	{"i915_ips_status", i915_ips_status, 0},
5407 	{"i915_sr_status", i915_sr_status, 0},
5408 	{"i915_opregion", i915_opregion, 0},
5409 	{"i915_vbt", i915_vbt, 0},
5410 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
5411 	{"i915_context_status", i915_context_status, 0},
5412 	{"i915_dump_lrc", i915_dump_lrc, 0},
5413 	{"i915_execlists", i915_execlists, 0},
5414 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
5415 	{"i915_swizzle_info", i915_swizzle_info, 0},
5416 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
5417 	{"i915_llc", i915_llc, 0},
5418 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
5419 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
5420 	{"i915_energy_uJ", i915_energy_uJ, 0},
5421 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
5422 	{"i915_power_domain_info", i915_power_domain_info, 0},
5423 	{"i915_dmc_info", i915_dmc_info, 0},
5424 	{"i915_display_info", i915_display_info, 0},
5425 	{"i915_semaphore_status", i915_semaphore_status, 0},
5426 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
5427 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
5428 	{"i915_wa_registers", i915_wa_registers, 0},
5429 	{"i915_ddb_info", i915_ddb_info, 0},
5430 	{"i915_sseu_status", i915_sseu_status, 0},
5431 	{"i915_drrs_status", i915_drrs_status, 0},
5432 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
5433 };
5434 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5435 
5436 static const struct i915_debugfs_files {
5437 	const char *name;
5438 	const struct file_operations *fops;
5439 } i915_debugfs_files[] = {
5440 	{"i915_wedged", &i915_wedged_fops},
5441 	{"i915_max_freq", &i915_max_freq_fops},
5442 	{"i915_min_freq", &i915_min_freq_fops},
5443 	{"i915_cache_sharing", &i915_cache_sharing_fops},
5444 	{"i915_ring_stop", &i915_ring_stop_fops},
5445 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
5446 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
5447 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
5448 	{"i915_error_state", &i915_error_state_fops},
5449 	{"i915_next_seqno", &i915_next_seqno_fops},
5450 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
5451 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
5452 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
5453 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
5454 	{"i915_fbc_false_color", &i915_fbc_fc_fops},
5455 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
5456 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
5457 	{"i915_dp_test_active", &i915_displayport_test_active_fops}
5458 };
5459 
5460 void intel_display_crc_init(struct drm_device *dev)
5461 {
5462 	struct drm_i915_private *dev_priv = dev->dev_private;
5463 	enum pipe pipe;
5464 
5465 	for_each_pipe(dev_priv, pipe) {
5466 		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
5467 
5468 		pipe_crc->opened = false;
5469 		spin_lock_init(&pipe_crc->lock);
5470 		init_waitqueue_head(&pipe_crc->wq);
5471 	}
5472 }
5473 
5474 int i915_debugfs_init(struct drm_minor *minor)
5475 {
5476 	int ret, i;
5477 
5478 	ret = i915_forcewake_create(minor->debugfs_root, minor);
5479 	if (ret)
5480 		return ret;
5481 
5482 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5483 		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
5484 		if (ret)
5485 			return ret;
5486 	}
5487 
5488 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5489 		ret = i915_debugfs_create(minor->debugfs_root, minor,
5490 					  i915_debugfs_files[i].name,
5491 					  i915_debugfs_files[i].fops);
5492 		if (ret)
5493 			return ret;
5494 	}
5495 
5496 	return drm_debugfs_create_files(i915_debugfs_list,
5497 					I915_DEBUGFS_ENTRIES,
5498 					minor->debugfs_root, minor);
5499 }
5500 
5501 void i915_debugfs_cleanup(struct drm_minor *minor)
5502 {
5503 	int i;
5504 
5505 	drm_debugfs_remove_files(i915_debugfs_list,
5506 				 I915_DEBUGFS_ENTRIES, minor);
5507 
5508 	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
5509 				 1, minor);
5510 
5511 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5512 		struct drm_info_list *info_list =
5513 			(struct drm_info_list *)&i915_pipe_crc_data[i];
5514 
5515 		drm_debugfs_remove_files(info_list, 1, minor);
5516 	}
5517 
5518 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5519 		struct drm_info_list *info_list =
5520 			(struct drm_info_list *) i915_debugfs_files[i].fops;
5521 
5522 		drm_debugfs_remove_files(info_list, 1, minor);
5523 	}
5524 }
5525 
5526 struct dpcd_block {
5527 	/* DPCD dump start address. */
5528 	unsigned int offset;
5529 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
5530 	unsigned int end;
5531 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5532 	size_t size;
5533 	/* Only valid for eDP. */
5534 	bool edp;
5535 };
5536 
5537 static const struct dpcd_block i915_dpcd_debug[] = {
5538 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
5539 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
5540 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
5541 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
5542 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
5543 	{ .offset = DP_SET_POWER },
5544 	{ .offset = DP_EDP_DPCD_REV },
5545 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5546 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5547 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5548 };
5549 
5550 static int i915_dpcd_show(struct seq_file *m, void *data)
5551 {
5552 	struct drm_connector *connector = m->private;
5553 	struct intel_dp *intel_dp =
5554 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5555 	uint8_t buf[16];
5556 	ssize_t err;
5557 	int i;
5558 
5559 	if (connector->status != connector_status_connected)
5560 		return -ENODEV;
5561 
5562 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5563 		const struct dpcd_block *b = &i915_dpcd_debug[i];
5564 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5565 
5566 		if (b->edp &&
5567 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5568 			continue;
5569 
5570 		/* low tech for now */
5571 		if (WARN_ON(size > sizeof(buf)))
5572 			continue;
5573 
5574 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
5575 		if (err <= 0) {
5576 			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
5577 				  size, b->offset, err);
5578 			continue;
5579 		}
5580 
5581 		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
5582 	}
5583 
5584 	return 0;
5585 }
5586 
5587 static int i915_dpcd_open(struct inode *inode, struct file *file)
5588 {
5589 	return single_open(file, i915_dpcd_show, inode->i_private);
5590 }
5591 
5592 static const struct file_operations i915_dpcd_fops = {
5593 	.owner = THIS_MODULE,
5594 	.open = i915_dpcd_open,
5595 	.read = seq_read,
5596 	.llseek = seq_lseek,
5597 	.release = single_release,
5598 };
5599 
5600 /**
5601  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5602  * @connector: pointer to a registered drm_connector
5603  *
5604  * Cleanup will be done by drm_connector_unregister() through a call to
5605  * drm_debugfs_connector_remove().
5606  *
5607  * Returns 0 on success, negative error codes on error.
5608  */
5609 int i915_debugfs_connector_add(struct drm_connector *connector)
5610 {
5611 	struct dentry *root = connector->debugfs_entry;
5612 
5613 	/* The connector must have been registered beforehands. */
5614 	if (!root)
5615 		return -ENODEV;
5616 
5617 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5618 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5619 		debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
5620 				    &i915_dpcd_fops);
5621 
5622 	return 0;
5623 }
5624