xref: /openbmc/linux/drivers/gpu/drm/i915/i915_debugfs.c (revision ea21feb37e753213a093e1f77b2c05ce57997ccd)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
37 #include <drm/drmP.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
41 #include "i915_drv.h"
42 
43 enum {
44 	ACTIVE_LIST,
45 	INACTIVE_LIST,
46 	PINNED_LIST,
47 };
48 
49 static const char *yesno(int v)
50 {
51 	return v ? "yes" : "no";
52 }
53 
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55  * allocated we need to hook into the minor for release. */
56 static int
57 drm_add_fake_info_node(struct drm_minor *minor,
58 		       struct dentry *ent,
59 		       const void *key)
60 {
61 	struct drm_info_node *node;
62 
63 	node = kmalloc(sizeof(*node), GFP_KERNEL);
64 	if (node == NULL) {
65 		debugfs_remove(ent);
66 		return -ENOMEM;
67 	}
68 
69 	node->minor = minor;
70 	node->dent = ent;
71 	node->info_ent = (void *) key;
72 
73 	mutex_lock(&minor->debugfs_lock);
74 	list_add(&node->list, &minor->debugfs_list);
75 	mutex_unlock(&minor->debugfs_lock);
76 
77 	return 0;
78 }
79 
80 static int i915_capabilities(struct seq_file *m, void *data)
81 {
82 	struct drm_info_node *node = m->private;
83 	struct drm_device *dev = node->minor->dev;
84 	const struct intel_device_info *info = INTEL_INFO(dev);
85 
86 	seq_printf(m, "gen: %d\n", info->gen);
87 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
88 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
91 #undef PRINT_FLAG
92 #undef SEP_SEMICOLON
93 
94 	return 0;
95 }
96 
97 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
98 {
99 	if (i915_gem_obj_is_pinned(obj))
100 		return "p";
101 	else
102 		return " ";
103 }
104 
105 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
106 {
107 	switch (obj->tiling_mode) {
108 	default:
109 	case I915_TILING_NONE: return " ";
110 	case I915_TILING_X: return "X";
111 	case I915_TILING_Y: return "Y";
112 	}
113 }
114 
115 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
116 {
117 	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
118 }
119 
120 static void
121 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
122 {
123 	struct i915_vma *vma;
124 	int pin_count = 0;
125 
126 	seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %x %x %x%s%s%s",
127 		   &obj->base,
128 		   get_pin_flag(obj),
129 		   get_tiling_flag(obj),
130 		   get_global_flag(obj),
131 		   obj->base.size / 1024,
132 		   obj->base.read_domains,
133 		   obj->base.write_domain,
134 		   i915_gem_request_get_seqno(obj->last_read_req),
135 		   i915_gem_request_get_seqno(obj->last_write_req),
136 		   i915_gem_request_get_seqno(obj->last_fenced_req),
137 		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
138 		   obj->dirty ? " dirty" : "",
139 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
140 	if (obj->base.name)
141 		seq_printf(m, " (name: %d)", obj->base.name);
142 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
143 		if (vma->pin_count > 0)
144 			pin_count++;
145 	}
146 	seq_printf(m, " (pinned x %d)", pin_count);
147 	if (obj->pin_display)
148 		seq_printf(m, " (display)");
149 	if (obj->fence_reg != I915_FENCE_REG_NONE)
150 		seq_printf(m, " (fence: %d)", obj->fence_reg);
151 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
152 		if (!i915_is_ggtt(vma->vm))
153 			seq_puts(m, " (pp");
154 		else
155 			seq_puts(m, " (g");
156 		seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
157 			   vma->node.start, vma->node.size,
158 			   vma->ggtt_view.type);
159 	}
160 	if (obj->stolen)
161 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
162 	if (obj->pin_mappable || obj->fault_mappable) {
163 		char s[3], *t = s;
164 		if (obj->pin_mappable)
165 			*t++ = 'p';
166 		if (obj->fault_mappable)
167 			*t++ = 'f';
168 		*t = '\0';
169 		seq_printf(m, " (%s mappable)", s);
170 	}
171 	if (obj->last_read_req != NULL)
172 		seq_printf(m, " (%s)",
173 			   i915_gem_request_get_ring(obj->last_read_req)->name);
174 	if (obj->frontbuffer_bits)
175 		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
176 }
177 
178 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
179 {
180 	seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
181 	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
182 	seq_putc(m, ' ');
183 }
184 
185 static int i915_gem_object_list_info(struct seq_file *m, void *data)
186 {
187 	struct drm_info_node *node = m->private;
188 	uintptr_t list = (uintptr_t) node->info_ent->data;
189 	struct list_head *head;
190 	struct drm_device *dev = node->minor->dev;
191 	struct drm_i915_private *dev_priv = dev->dev_private;
192 	struct i915_address_space *vm = &dev_priv->gtt.base;
193 	struct i915_vma *vma;
194 	size_t total_obj_size, total_gtt_size;
195 	int count, ret;
196 
197 	ret = mutex_lock_interruptible(&dev->struct_mutex);
198 	if (ret)
199 		return ret;
200 
201 	/* FIXME: the user of this interface might want more than just GGTT */
202 	switch (list) {
203 	case ACTIVE_LIST:
204 		seq_puts(m, "Active:\n");
205 		head = &vm->active_list;
206 		break;
207 	case INACTIVE_LIST:
208 		seq_puts(m, "Inactive:\n");
209 		head = &vm->inactive_list;
210 		break;
211 	default:
212 		mutex_unlock(&dev->struct_mutex);
213 		return -EINVAL;
214 	}
215 
216 	total_obj_size = total_gtt_size = count = 0;
217 	list_for_each_entry(vma, head, mm_list) {
218 		seq_printf(m, "   ");
219 		describe_obj(m, vma->obj);
220 		seq_printf(m, "\n");
221 		total_obj_size += vma->obj->base.size;
222 		total_gtt_size += vma->node.size;
223 		count++;
224 	}
225 	mutex_unlock(&dev->struct_mutex);
226 
227 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
228 		   count, total_obj_size, total_gtt_size);
229 	return 0;
230 }
231 
232 static int obj_rank_by_stolen(void *priv,
233 			      struct list_head *A, struct list_head *B)
234 {
235 	struct drm_i915_gem_object *a =
236 		container_of(A, struct drm_i915_gem_object, obj_exec_link);
237 	struct drm_i915_gem_object *b =
238 		container_of(B, struct drm_i915_gem_object, obj_exec_link);
239 
240 	return a->stolen->start - b->stolen->start;
241 }
242 
243 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
244 {
245 	struct drm_info_node *node = m->private;
246 	struct drm_device *dev = node->minor->dev;
247 	struct drm_i915_private *dev_priv = dev->dev_private;
248 	struct drm_i915_gem_object *obj;
249 	size_t total_obj_size, total_gtt_size;
250 	LIST_HEAD(stolen);
251 	int count, ret;
252 
253 	ret = mutex_lock_interruptible(&dev->struct_mutex);
254 	if (ret)
255 		return ret;
256 
257 	total_obj_size = total_gtt_size = count = 0;
258 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
259 		if (obj->stolen == NULL)
260 			continue;
261 
262 		list_add(&obj->obj_exec_link, &stolen);
263 
264 		total_obj_size += obj->base.size;
265 		total_gtt_size += i915_gem_obj_ggtt_size(obj);
266 		count++;
267 	}
268 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
269 		if (obj->stolen == NULL)
270 			continue;
271 
272 		list_add(&obj->obj_exec_link, &stolen);
273 
274 		total_obj_size += obj->base.size;
275 		count++;
276 	}
277 	list_sort(NULL, &stolen, obj_rank_by_stolen);
278 	seq_puts(m, "Stolen:\n");
279 	while (!list_empty(&stolen)) {
280 		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
281 		seq_puts(m, "   ");
282 		describe_obj(m, obj);
283 		seq_putc(m, '\n');
284 		list_del_init(&obj->obj_exec_link);
285 	}
286 	mutex_unlock(&dev->struct_mutex);
287 
288 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
289 		   count, total_obj_size, total_gtt_size);
290 	return 0;
291 }
292 
293 #define count_objects(list, member) do { \
294 	list_for_each_entry(obj, list, member) { \
295 		size += i915_gem_obj_ggtt_size(obj); \
296 		++count; \
297 		if (obj->map_and_fenceable) { \
298 			mappable_size += i915_gem_obj_ggtt_size(obj); \
299 			++mappable_count; \
300 		} \
301 	} \
302 } while (0)
303 
304 struct file_stats {
305 	struct drm_i915_file_private *file_priv;
306 	int count;
307 	size_t total, unbound;
308 	size_t global, shared;
309 	size_t active, inactive;
310 };
311 
312 static int per_file_stats(int id, void *ptr, void *data)
313 {
314 	struct drm_i915_gem_object *obj = ptr;
315 	struct file_stats *stats = data;
316 	struct i915_vma *vma;
317 
318 	stats->count++;
319 	stats->total += obj->base.size;
320 
321 	if (obj->base.name || obj->base.dma_buf)
322 		stats->shared += obj->base.size;
323 
324 	if (USES_FULL_PPGTT(obj->base.dev)) {
325 		list_for_each_entry(vma, &obj->vma_list, vma_link) {
326 			struct i915_hw_ppgtt *ppgtt;
327 
328 			if (!drm_mm_node_allocated(&vma->node))
329 				continue;
330 
331 			if (i915_is_ggtt(vma->vm)) {
332 				stats->global += obj->base.size;
333 				continue;
334 			}
335 
336 			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
337 			if (ppgtt->file_priv != stats->file_priv)
338 				continue;
339 
340 			if (obj->active) /* XXX per-vma statistic */
341 				stats->active += obj->base.size;
342 			else
343 				stats->inactive += obj->base.size;
344 
345 			return 0;
346 		}
347 	} else {
348 		if (i915_gem_obj_ggtt_bound(obj)) {
349 			stats->global += obj->base.size;
350 			if (obj->active)
351 				stats->active += obj->base.size;
352 			else
353 				stats->inactive += obj->base.size;
354 			return 0;
355 		}
356 	}
357 
358 	if (!list_empty(&obj->global_list))
359 		stats->unbound += obj->base.size;
360 
361 	return 0;
362 }
363 
364 #define print_file_stats(m, name, stats) \
365 	seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
366 		   name, \
367 		   stats.count, \
368 		   stats.total, \
369 		   stats.active, \
370 		   stats.inactive, \
371 		   stats.global, \
372 		   stats.shared, \
373 		   stats.unbound)
374 
375 static void print_batch_pool_stats(struct seq_file *m,
376 				   struct drm_i915_private *dev_priv)
377 {
378 	struct drm_i915_gem_object *obj;
379 	struct file_stats stats;
380 
381 	memset(&stats, 0, sizeof(stats));
382 
383 	list_for_each_entry(obj,
384 			    &dev_priv->mm.batch_pool.cache_list,
385 			    batch_pool_list)
386 		per_file_stats(0, obj, &stats);
387 
388 	print_file_stats(m, "batch pool", stats);
389 }
390 
391 #define count_vmas(list, member) do { \
392 	list_for_each_entry(vma, list, member) { \
393 		size += i915_gem_obj_ggtt_size(vma->obj); \
394 		++count; \
395 		if (vma->obj->map_and_fenceable) { \
396 			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
397 			++mappable_count; \
398 		} \
399 	} \
400 } while (0)
401 
402 static int i915_gem_object_info(struct seq_file *m, void* data)
403 {
404 	struct drm_info_node *node = m->private;
405 	struct drm_device *dev = node->minor->dev;
406 	struct drm_i915_private *dev_priv = dev->dev_private;
407 	u32 count, mappable_count, purgeable_count;
408 	size_t size, mappable_size, purgeable_size;
409 	struct drm_i915_gem_object *obj;
410 	struct i915_address_space *vm = &dev_priv->gtt.base;
411 	struct drm_file *file;
412 	struct i915_vma *vma;
413 	int ret;
414 
415 	ret = mutex_lock_interruptible(&dev->struct_mutex);
416 	if (ret)
417 		return ret;
418 
419 	seq_printf(m, "%u objects, %zu bytes\n",
420 		   dev_priv->mm.object_count,
421 		   dev_priv->mm.object_memory);
422 
423 	size = count = mappable_size = mappable_count = 0;
424 	count_objects(&dev_priv->mm.bound_list, global_list);
425 	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
426 		   count, mappable_count, size, mappable_size);
427 
428 	size = count = mappable_size = mappable_count = 0;
429 	count_vmas(&vm->active_list, mm_list);
430 	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
431 		   count, mappable_count, size, mappable_size);
432 
433 	size = count = mappable_size = mappable_count = 0;
434 	count_vmas(&vm->inactive_list, mm_list);
435 	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
436 		   count, mappable_count, size, mappable_size);
437 
438 	size = count = purgeable_size = purgeable_count = 0;
439 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
440 		size += obj->base.size, ++count;
441 		if (obj->madv == I915_MADV_DONTNEED)
442 			purgeable_size += obj->base.size, ++purgeable_count;
443 	}
444 	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
445 
446 	size = count = mappable_size = mappable_count = 0;
447 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
448 		if (obj->fault_mappable) {
449 			size += i915_gem_obj_ggtt_size(obj);
450 			++count;
451 		}
452 		if (obj->pin_mappable) {
453 			mappable_size += i915_gem_obj_ggtt_size(obj);
454 			++mappable_count;
455 		}
456 		if (obj->madv == I915_MADV_DONTNEED) {
457 			purgeable_size += obj->base.size;
458 			++purgeable_count;
459 		}
460 	}
461 	seq_printf(m, "%u purgeable objects, %zu bytes\n",
462 		   purgeable_count, purgeable_size);
463 	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
464 		   mappable_count, mappable_size);
465 	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
466 		   count, size);
467 
468 	seq_printf(m, "%zu [%lu] gtt total\n",
469 		   dev_priv->gtt.base.total,
470 		   dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
471 
472 	seq_putc(m, '\n');
473 	print_batch_pool_stats(m, dev_priv);
474 
475 	seq_putc(m, '\n');
476 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
477 		struct file_stats stats;
478 		struct task_struct *task;
479 
480 		memset(&stats, 0, sizeof(stats));
481 		stats.file_priv = file->driver_priv;
482 		spin_lock(&file->table_lock);
483 		idr_for_each(&file->object_idr, per_file_stats, &stats);
484 		spin_unlock(&file->table_lock);
485 		/*
486 		 * Although we have a valid reference on file->pid, that does
487 		 * not guarantee that the task_struct who called get_pid() is
488 		 * still alive (e.g. get_pid(current) => fork() => exit()).
489 		 * Therefore, we need to protect this ->comm access using RCU.
490 		 */
491 		rcu_read_lock();
492 		task = pid_task(file->pid, PIDTYPE_PID);
493 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
494 		rcu_read_unlock();
495 	}
496 
497 	mutex_unlock(&dev->struct_mutex);
498 
499 	return 0;
500 }
501 
502 static int i915_gem_gtt_info(struct seq_file *m, void *data)
503 {
504 	struct drm_info_node *node = m->private;
505 	struct drm_device *dev = node->minor->dev;
506 	uintptr_t list = (uintptr_t) node->info_ent->data;
507 	struct drm_i915_private *dev_priv = dev->dev_private;
508 	struct drm_i915_gem_object *obj;
509 	size_t total_obj_size, total_gtt_size;
510 	int count, ret;
511 
512 	ret = mutex_lock_interruptible(&dev->struct_mutex);
513 	if (ret)
514 		return ret;
515 
516 	total_obj_size = total_gtt_size = count = 0;
517 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
518 		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
519 			continue;
520 
521 		seq_puts(m, "   ");
522 		describe_obj(m, obj);
523 		seq_putc(m, '\n');
524 		total_obj_size += obj->base.size;
525 		total_gtt_size += i915_gem_obj_ggtt_size(obj);
526 		count++;
527 	}
528 
529 	mutex_unlock(&dev->struct_mutex);
530 
531 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
532 		   count, total_obj_size, total_gtt_size);
533 
534 	return 0;
535 }
536 
537 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
538 {
539 	struct drm_info_node *node = m->private;
540 	struct drm_device *dev = node->minor->dev;
541 	struct drm_i915_private *dev_priv = dev->dev_private;
542 	struct intel_crtc *crtc;
543 	int ret;
544 
545 	ret = mutex_lock_interruptible(&dev->struct_mutex);
546 	if (ret)
547 		return ret;
548 
549 	for_each_intel_crtc(dev, crtc) {
550 		const char pipe = pipe_name(crtc->pipe);
551 		const char plane = plane_name(crtc->plane);
552 		struct intel_unpin_work *work;
553 
554 		spin_lock_irq(&dev->event_lock);
555 		work = crtc->unpin_work;
556 		if (work == NULL) {
557 			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
558 				   pipe, plane);
559 		} else {
560 			u32 addr;
561 
562 			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
563 				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
564 					   pipe, plane);
565 			} else {
566 				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
567 					   pipe, plane);
568 			}
569 			if (work->flip_queued_req) {
570 				struct intel_engine_cs *ring =
571 					i915_gem_request_get_ring(work->flip_queued_req);
572 
573 				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
574 					   ring->name,
575 					   i915_gem_request_get_seqno(work->flip_queued_req),
576 					   dev_priv->next_seqno,
577 					   ring->get_seqno(ring, true),
578 					   i915_gem_request_completed(work->flip_queued_req, true));
579 			} else
580 				seq_printf(m, "Flip not associated with any ring\n");
581 			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
582 				   work->flip_queued_vblank,
583 				   work->flip_ready_vblank,
584 				   drm_crtc_vblank_count(&crtc->base));
585 			if (work->enable_stall_check)
586 				seq_puts(m, "Stall check enabled, ");
587 			else
588 				seq_puts(m, "Stall check waiting for page flip ioctl, ");
589 			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
590 
591 			if (INTEL_INFO(dev)->gen >= 4)
592 				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
593 			else
594 				addr = I915_READ(DSPADDR(crtc->plane));
595 			seq_printf(m, "Current scanout address 0x%08x\n", addr);
596 
597 			if (work->pending_flip_obj) {
598 				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
599 				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
600 			}
601 		}
602 		spin_unlock_irq(&dev->event_lock);
603 	}
604 
605 	mutex_unlock(&dev->struct_mutex);
606 
607 	return 0;
608 }
609 
610 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
611 {
612 	struct drm_info_node *node = m->private;
613 	struct drm_device *dev = node->minor->dev;
614 	struct drm_i915_private *dev_priv = dev->dev_private;
615 	struct drm_i915_gem_object *obj;
616 	int count = 0;
617 	int ret;
618 
619 	ret = mutex_lock_interruptible(&dev->struct_mutex);
620 	if (ret)
621 		return ret;
622 
623 	seq_puts(m, "cache:\n");
624 	list_for_each_entry(obj,
625 			    &dev_priv->mm.batch_pool.cache_list,
626 			    batch_pool_list) {
627 		seq_puts(m, "   ");
628 		describe_obj(m, obj);
629 		seq_putc(m, '\n');
630 		count++;
631 	}
632 
633 	seq_printf(m, "total: %d\n", count);
634 
635 	mutex_unlock(&dev->struct_mutex);
636 
637 	return 0;
638 }
639 
640 static int i915_gem_request_info(struct seq_file *m, void *data)
641 {
642 	struct drm_info_node *node = m->private;
643 	struct drm_device *dev = node->minor->dev;
644 	struct drm_i915_private *dev_priv = dev->dev_private;
645 	struct intel_engine_cs *ring;
646 	struct drm_i915_gem_request *gem_request;
647 	int ret, count, i;
648 
649 	ret = mutex_lock_interruptible(&dev->struct_mutex);
650 	if (ret)
651 		return ret;
652 
653 	count = 0;
654 	for_each_ring(ring, dev_priv, i) {
655 		if (list_empty(&ring->request_list))
656 			continue;
657 
658 		seq_printf(m, "%s requests:\n", ring->name);
659 		list_for_each_entry(gem_request,
660 				    &ring->request_list,
661 				    list) {
662 			seq_printf(m, "    %x @ %d\n",
663 				   gem_request->seqno,
664 				   (int) (jiffies - gem_request->emitted_jiffies));
665 		}
666 		count++;
667 	}
668 	mutex_unlock(&dev->struct_mutex);
669 
670 	if (count == 0)
671 		seq_puts(m, "No requests\n");
672 
673 	return 0;
674 }
675 
676 static void i915_ring_seqno_info(struct seq_file *m,
677 				 struct intel_engine_cs *ring)
678 {
679 	if (ring->get_seqno) {
680 		seq_printf(m, "Current sequence (%s): %x\n",
681 			   ring->name, ring->get_seqno(ring, false));
682 	}
683 }
684 
685 static int i915_gem_seqno_info(struct seq_file *m, void *data)
686 {
687 	struct drm_info_node *node = m->private;
688 	struct drm_device *dev = node->minor->dev;
689 	struct drm_i915_private *dev_priv = dev->dev_private;
690 	struct intel_engine_cs *ring;
691 	int ret, i;
692 
693 	ret = mutex_lock_interruptible(&dev->struct_mutex);
694 	if (ret)
695 		return ret;
696 	intel_runtime_pm_get(dev_priv);
697 
698 	for_each_ring(ring, dev_priv, i)
699 		i915_ring_seqno_info(m, ring);
700 
701 	intel_runtime_pm_put(dev_priv);
702 	mutex_unlock(&dev->struct_mutex);
703 
704 	return 0;
705 }
706 
707 
708 static int i915_interrupt_info(struct seq_file *m, void *data)
709 {
710 	struct drm_info_node *node = m->private;
711 	struct drm_device *dev = node->minor->dev;
712 	struct drm_i915_private *dev_priv = dev->dev_private;
713 	struct intel_engine_cs *ring;
714 	int ret, i, pipe;
715 
716 	ret = mutex_lock_interruptible(&dev->struct_mutex);
717 	if (ret)
718 		return ret;
719 	intel_runtime_pm_get(dev_priv);
720 
721 	if (IS_CHERRYVIEW(dev)) {
722 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
723 			   I915_READ(GEN8_MASTER_IRQ));
724 
725 		seq_printf(m, "Display IER:\t%08x\n",
726 			   I915_READ(VLV_IER));
727 		seq_printf(m, "Display IIR:\t%08x\n",
728 			   I915_READ(VLV_IIR));
729 		seq_printf(m, "Display IIR_RW:\t%08x\n",
730 			   I915_READ(VLV_IIR_RW));
731 		seq_printf(m, "Display IMR:\t%08x\n",
732 			   I915_READ(VLV_IMR));
733 		for_each_pipe(dev_priv, pipe)
734 			seq_printf(m, "Pipe %c stat:\t%08x\n",
735 				   pipe_name(pipe),
736 				   I915_READ(PIPESTAT(pipe)));
737 
738 		seq_printf(m, "Port hotplug:\t%08x\n",
739 			   I915_READ(PORT_HOTPLUG_EN));
740 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
741 			   I915_READ(VLV_DPFLIPSTAT));
742 		seq_printf(m, "DPINVGTT:\t%08x\n",
743 			   I915_READ(DPINVGTT));
744 
745 		for (i = 0; i < 4; i++) {
746 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
747 				   i, I915_READ(GEN8_GT_IMR(i)));
748 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
749 				   i, I915_READ(GEN8_GT_IIR(i)));
750 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
751 				   i, I915_READ(GEN8_GT_IER(i)));
752 		}
753 
754 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
755 			   I915_READ(GEN8_PCU_IMR));
756 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
757 			   I915_READ(GEN8_PCU_IIR));
758 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
759 			   I915_READ(GEN8_PCU_IER));
760 	} else if (INTEL_INFO(dev)->gen >= 8) {
761 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
762 			   I915_READ(GEN8_MASTER_IRQ));
763 
764 		for (i = 0; i < 4; i++) {
765 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
766 				   i, I915_READ(GEN8_GT_IMR(i)));
767 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
768 				   i, I915_READ(GEN8_GT_IIR(i)));
769 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
770 				   i, I915_READ(GEN8_GT_IER(i)));
771 		}
772 
773 		for_each_pipe(dev_priv, pipe) {
774 			if (!intel_display_power_is_enabled(dev_priv,
775 						POWER_DOMAIN_PIPE(pipe))) {
776 				seq_printf(m, "Pipe %c power disabled\n",
777 					   pipe_name(pipe));
778 				continue;
779 			}
780 			seq_printf(m, "Pipe %c IMR:\t%08x\n",
781 				   pipe_name(pipe),
782 				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
783 			seq_printf(m, "Pipe %c IIR:\t%08x\n",
784 				   pipe_name(pipe),
785 				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
786 			seq_printf(m, "Pipe %c IER:\t%08x\n",
787 				   pipe_name(pipe),
788 				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
789 		}
790 
791 		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
792 			   I915_READ(GEN8_DE_PORT_IMR));
793 		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
794 			   I915_READ(GEN8_DE_PORT_IIR));
795 		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
796 			   I915_READ(GEN8_DE_PORT_IER));
797 
798 		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
799 			   I915_READ(GEN8_DE_MISC_IMR));
800 		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
801 			   I915_READ(GEN8_DE_MISC_IIR));
802 		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
803 			   I915_READ(GEN8_DE_MISC_IER));
804 
805 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
806 			   I915_READ(GEN8_PCU_IMR));
807 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
808 			   I915_READ(GEN8_PCU_IIR));
809 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
810 			   I915_READ(GEN8_PCU_IER));
811 	} else if (IS_VALLEYVIEW(dev)) {
812 		seq_printf(m, "Display IER:\t%08x\n",
813 			   I915_READ(VLV_IER));
814 		seq_printf(m, "Display IIR:\t%08x\n",
815 			   I915_READ(VLV_IIR));
816 		seq_printf(m, "Display IIR_RW:\t%08x\n",
817 			   I915_READ(VLV_IIR_RW));
818 		seq_printf(m, "Display IMR:\t%08x\n",
819 			   I915_READ(VLV_IMR));
820 		for_each_pipe(dev_priv, pipe)
821 			seq_printf(m, "Pipe %c stat:\t%08x\n",
822 				   pipe_name(pipe),
823 				   I915_READ(PIPESTAT(pipe)));
824 
825 		seq_printf(m, "Master IER:\t%08x\n",
826 			   I915_READ(VLV_MASTER_IER));
827 
828 		seq_printf(m, "Render IER:\t%08x\n",
829 			   I915_READ(GTIER));
830 		seq_printf(m, "Render IIR:\t%08x\n",
831 			   I915_READ(GTIIR));
832 		seq_printf(m, "Render IMR:\t%08x\n",
833 			   I915_READ(GTIMR));
834 
835 		seq_printf(m, "PM IER:\t\t%08x\n",
836 			   I915_READ(GEN6_PMIER));
837 		seq_printf(m, "PM IIR:\t\t%08x\n",
838 			   I915_READ(GEN6_PMIIR));
839 		seq_printf(m, "PM IMR:\t\t%08x\n",
840 			   I915_READ(GEN6_PMIMR));
841 
842 		seq_printf(m, "Port hotplug:\t%08x\n",
843 			   I915_READ(PORT_HOTPLUG_EN));
844 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
845 			   I915_READ(VLV_DPFLIPSTAT));
846 		seq_printf(m, "DPINVGTT:\t%08x\n",
847 			   I915_READ(DPINVGTT));
848 
849 	} else if (!HAS_PCH_SPLIT(dev)) {
850 		seq_printf(m, "Interrupt enable:    %08x\n",
851 			   I915_READ(IER));
852 		seq_printf(m, "Interrupt identity:  %08x\n",
853 			   I915_READ(IIR));
854 		seq_printf(m, "Interrupt mask:      %08x\n",
855 			   I915_READ(IMR));
856 		for_each_pipe(dev_priv, pipe)
857 			seq_printf(m, "Pipe %c stat:         %08x\n",
858 				   pipe_name(pipe),
859 				   I915_READ(PIPESTAT(pipe)));
860 	} else {
861 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
862 			   I915_READ(DEIER));
863 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
864 			   I915_READ(DEIIR));
865 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
866 			   I915_READ(DEIMR));
867 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
868 			   I915_READ(SDEIER));
869 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
870 			   I915_READ(SDEIIR));
871 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
872 			   I915_READ(SDEIMR));
873 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
874 			   I915_READ(GTIER));
875 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
876 			   I915_READ(GTIIR));
877 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
878 			   I915_READ(GTIMR));
879 	}
880 	for_each_ring(ring, dev_priv, i) {
881 		if (INTEL_INFO(dev)->gen >= 6) {
882 			seq_printf(m,
883 				   "Graphics Interrupt mask (%s):	%08x\n",
884 				   ring->name, I915_READ_IMR(ring));
885 		}
886 		i915_ring_seqno_info(m, ring);
887 	}
888 	intel_runtime_pm_put(dev_priv);
889 	mutex_unlock(&dev->struct_mutex);
890 
891 	return 0;
892 }
893 
894 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
895 {
896 	struct drm_info_node *node = m->private;
897 	struct drm_device *dev = node->minor->dev;
898 	struct drm_i915_private *dev_priv = dev->dev_private;
899 	int i, ret;
900 
901 	ret = mutex_lock_interruptible(&dev->struct_mutex);
902 	if (ret)
903 		return ret;
904 
905 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
906 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
907 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
908 		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
909 
910 		seq_printf(m, "Fence %d, pin count = %d, object = ",
911 			   i, dev_priv->fence_regs[i].pin_count);
912 		if (obj == NULL)
913 			seq_puts(m, "unused");
914 		else
915 			describe_obj(m, obj);
916 		seq_putc(m, '\n');
917 	}
918 
919 	mutex_unlock(&dev->struct_mutex);
920 	return 0;
921 }
922 
923 static int i915_hws_info(struct seq_file *m, void *data)
924 {
925 	struct drm_info_node *node = m->private;
926 	struct drm_device *dev = node->minor->dev;
927 	struct drm_i915_private *dev_priv = dev->dev_private;
928 	struct intel_engine_cs *ring;
929 	const u32 *hws;
930 	int i;
931 
932 	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
933 	hws = ring->status_page.page_addr;
934 	if (hws == NULL)
935 		return 0;
936 
937 	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
938 		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
939 			   i * 4,
940 			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
941 	}
942 	return 0;
943 }
944 
945 static ssize_t
946 i915_error_state_write(struct file *filp,
947 		       const char __user *ubuf,
948 		       size_t cnt,
949 		       loff_t *ppos)
950 {
951 	struct i915_error_state_file_priv *error_priv = filp->private_data;
952 	struct drm_device *dev = error_priv->dev;
953 	int ret;
954 
955 	DRM_DEBUG_DRIVER("Resetting error state\n");
956 
957 	ret = mutex_lock_interruptible(&dev->struct_mutex);
958 	if (ret)
959 		return ret;
960 
961 	i915_destroy_error_state(dev);
962 	mutex_unlock(&dev->struct_mutex);
963 
964 	return cnt;
965 }
966 
967 static int i915_error_state_open(struct inode *inode, struct file *file)
968 {
969 	struct drm_device *dev = inode->i_private;
970 	struct i915_error_state_file_priv *error_priv;
971 
972 	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
973 	if (!error_priv)
974 		return -ENOMEM;
975 
976 	error_priv->dev = dev;
977 
978 	i915_error_state_get(dev, error_priv);
979 
980 	file->private_data = error_priv;
981 
982 	return 0;
983 }
984 
985 static int i915_error_state_release(struct inode *inode, struct file *file)
986 {
987 	struct i915_error_state_file_priv *error_priv = file->private_data;
988 
989 	i915_error_state_put(error_priv);
990 	kfree(error_priv);
991 
992 	return 0;
993 }
994 
995 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
996 				     size_t count, loff_t *pos)
997 {
998 	struct i915_error_state_file_priv *error_priv = file->private_data;
999 	struct drm_i915_error_state_buf error_str;
1000 	loff_t tmp_pos = 0;
1001 	ssize_t ret_count = 0;
1002 	int ret;
1003 
1004 	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
1005 	if (ret)
1006 		return ret;
1007 
1008 	ret = i915_error_state_to_str(&error_str, error_priv);
1009 	if (ret)
1010 		goto out;
1011 
1012 	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1013 					    error_str.buf,
1014 					    error_str.bytes);
1015 
1016 	if (ret_count < 0)
1017 		ret = ret_count;
1018 	else
1019 		*pos = error_str.start + ret_count;
1020 out:
1021 	i915_error_state_buf_release(&error_str);
1022 	return ret ?: ret_count;
1023 }
1024 
1025 static const struct file_operations i915_error_state_fops = {
1026 	.owner = THIS_MODULE,
1027 	.open = i915_error_state_open,
1028 	.read = i915_error_state_read,
1029 	.write = i915_error_state_write,
1030 	.llseek = default_llseek,
1031 	.release = i915_error_state_release,
1032 };
1033 
1034 static int
1035 i915_next_seqno_get(void *data, u64 *val)
1036 {
1037 	struct drm_device *dev = data;
1038 	struct drm_i915_private *dev_priv = dev->dev_private;
1039 	int ret;
1040 
1041 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1042 	if (ret)
1043 		return ret;
1044 
1045 	*val = dev_priv->next_seqno;
1046 	mutex_unlock(&dev->struct_mutex);
1047 
1048 	return 0;
1049 }
1050 
1051 static int
1052 i915_next_seqno_set(void *data, u64 val)
1053 {
1054 	struct drm_device *dev = data;
1055 	int ret;
1056 
1057 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1058 	if (ret)
1059 		return ret;
1060 
1061 	ret = i915_gem_set_seqno(dev, val);
1062 	mutex_unlock(&dev->struct_mutex);
1063 
1064 	return ret;
1065 }
1066 
1067 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1068 			i915_next_seqno_get, i915_next_seqno_set,
1069 			"0x%llx\n");
1070 
1071 static int i915_frequency_info(struct seq_file *m, void *unused)
1072 {
1073 	struct drm_info_node *node = m->private;
1074 	struct drm_device *dev = node->minor->dev;
1075 	struct drm_i915_private *dev_priv = dev->dev_private;
1076 	int ret = 0;
1077 
1078 	intel_runtime_pm_get(dev_priv);
1079 
1080 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1081 
1082 	if (IS_GEN5(dev)) {
1083 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1084 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1085 
1086 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1087 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1088 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1089 			   MEMSTAT_VID_SHIFT);
1090 		seq_printf(m, "Current P-state: %d\n",
1091 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1092 	} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
1093 		   IS_BROADWELL(dev) || IS_GEN9(dev)) {
1094 		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1095 		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1096 		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1097 		u32 rpmodectl, rpinclimit, rpdeclimit;
1098 		u32 rpstat, cagf, reqf;
1099 		u32 rpupei, rpcurup, rpprevup;
1100 		u32 rpdownei, rpcurdown, rpprevdown;
1101 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1102 		int max_freq;
1103 
1104 		/* RPSTAT1 is in the GT power well */
1105 		ret = mutex_lock_interruptible(&dev->struct_mutex);
1106 		if (ret)
1107 			goto out;
1108 
1109 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1110 
1111 		reqf = I915_READ(GEN6_RPNSWREQ);
1112 		if (IS_GEN9(dev))
1113 			reqf >>= 23;
1114 		else {
1115 			reqf &= ~GEN6_TURBO_DISABLE;
1116 			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1117 				reqf >>= 24;
1118 			else
1119 				reqf >>= 25;
1120 		}
1121 		reqf = intel_gpu_freq(dev_priv, reqf);
1122 
1123 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1124 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1125 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1126 
1127 		rpstat = I915_READ(GEN6_RPSTAT1);
1128 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1129 		rpcurup = I915_READ(GEN6_RP_CUR_UP);
1130 		rpprevup = I915_READ(GEN6_RP_PREV_UP);
1131 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1132 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1133 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1134 		if (IS_GEN9(dev))
1135 			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1136 		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1137 			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1138 		else
1139 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1140 		cagf = intel_gpu_freq(dev_priv, cagf);
1141 
1142 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1143 		mutex_unlock(&dev->struct_mutex);
1144 
1145 		if (IS_GEN6(dev) || IS_GEN7(dev)) {
1146 			pm_ier = I915_READ(GEN6_PMIER);
1147 			pm_imr = I915_READ(GEN6_PMIMR);
1148 			pm_isr = I915_READ(GEN6_PMISR);
1149 			pm_iir = I915_READ(GEN6_PMIIR);
1150 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1151 		} else {
1152 			pm_ier = I915_READ(GEN8_GT_IER(2));
1153 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1154 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1155 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1156 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1157 		}
1158 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1159 			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1160 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1161 		seq_printf(m, "Render p-state ratio: %d\n",
1162 			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
1163 		seq_printf(m, "Render p-state VID: %d\n",
1164 			   gt_perf_status & 0xff);
1165 		seq_printf(m, "Render p-state limit: %d\n",
1166 			   rp_state_limits & 0xff);
1167 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1168 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1169 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1170 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1171 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1172 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1173 		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1174 			   GEN6_CURICONT_MASK);
1175 		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1176 			   GEN6_CURBSYTAVG_MASK);
1177 		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1178 			   GEN6_CURBSYTAVG_MASK);
1179 		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1180 			   GEN6_CURIAVG_MASK);
1181 		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1182 			   GEN6_CURBSYTAVG_MASK);
1183 		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1184 			   GEN6_CURBSYTAVG_MASK);
1185 
1186 		max_freq = (rp_state_cap & 0xff0000) >> 16;
1187 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
1188 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1189 			   intel_gpu_freq(dev_priv, max_freq));
1190 
1191 		max_freq = (rp_state_cap & 0xff00) >> 8;
1192 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
1193 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1194 			   intel_gpu_freq(dev_priv, max_freq));
1195 
1196 		max_freq = rp_state_cap & 0xff;
1197 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
1198 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1199 			   intel_gpu_freq(dev_priv, max_freq));
1200 
1201 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1202 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1203 
1204 		seq_printf(m, "Idle freq: %d MHz\n",
1205 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1206 	} else if (IS_VALLEYVIEW(dev)) {
1207 		u32 freq_sts;
1208 
1209 		mutex_lock(&dev_priv->rps.hw_lock);
1210 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1211 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1212 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1213 
1214 		seq_printf(m, "max GPU freq: %d MHz\n",
1215 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1216 
1217 		seq_printf(m, "min GPU freq: %d MHz\n",
1218 			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1219 
1220 		seq_printf(m, "idle GPU freq: %d MHz\n",
1221 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1222 
1223 		seq_printf(m,
1224 			   "efficient (RPe) frequency: %d MHz\n",
1225 			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1226 
1227 		seq_printf(m, "current GPU freq: %d MHz\n",
1228 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1229 		mutex_unlock(&dev_priv->rps.hw_lock);
1230 	} else {
1231 		seq_puts(m, "no P-state info available\n");
1232 	}
1233 
1234 out:
1235 	intel_runtime_pm_put(dev_priv);
1236 	return ret;
1237 }
1238 
1239 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1240 {
1241 	struct drm_info_node *node = m->private;
1242 	struct drm_device *dev = node->minor->dev;
1243 	struct drm_i915_private *dev_priv = dev->dev_private;
1244 	struct intel_engine_cs *ring;
1245 	u64 acthd[I915_NUM_RINGS];
1246 	u32 seqno[I915_NUM_RINGS];
1247 	int i;
1248 
1249 	if (!i915.enable_hangcheck) {
1250 		seq_printf(m, "Hangcheck disabled\n");
1251 		return 0;
1252 	}
1253 
1254 	intel_runtime_pm_get(dev_priv);
1255 
1256 	for_each_ring(ring, dev_priv, i) {
1257 		seqno[i] = ring->get_seqno(ring, false);
1258 		acthd[i] = intel_ring_get_active_head(ring);
1259 	}
1260 
1261 	intel_runtime_pm_put(dev_priv);
1262 
1263 	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1264 		seq_printf(m, "Hangcheck active, fires in %dms\n",
1265 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1266 					    jiffies));
1267 	} else
1268 		seq_printf(m, "Hangcheck inactive\n");
1269 
1270 	for_each_ring(ring, dev_priv, i) {
1271 		seq_printf(m, "%s:\n", ring->name);
1272 		seq_printf(m, "\tseqno = %x [current %x]\n",
1273 			   ring->hangcheck.seqno, seqno[i]);
1274 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1275 			   (long long)ring->hangcheck.acthd,
1276 			   (long long)acthd[i]);
1277 		seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
1278 			   (long long)ring->hangcheck.max_acthd);
1279 		seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
1280 		seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
1281 	}
1282 
1283 	return 0;
1284 }
1285 
1286 static int ironlake_drpc_info(struct seq_file *m)
1287 {
1288 	struct drm_info_node *node = m->private;
1289 	struct drm_device *dev = node->minor->dev;
1290 	struct drm_i915_private *dev_priv = dev->dev_private;
1291 	u32 rgvmodectl, rstdbyctl;
1292 	u16 crstandvid;
1293 	int ret;
1294 
1295 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1296 	if (ret)
1297 		return ret;
1298 	intel_runtime_pm_get(dev_priv);
1299 
1300 	rgvmodectl = I915_READ(MEMMODECTL);
1301 	rstdbyctl = I915_READ(RSTDBYCTL);
1302 	crstandvid = I915_READ16(CRSTANDVID);
1303 
1304 	intel_runtime_pm_put(dev_priv);
1305 	mutex_unlock(&dev->struct_mutex);
1306 
1307 	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1308 		   "yes" : "no");
1309 	seq_printf(m, "Boost freq: %d\n",
1310 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1311 		   MEMMODE_BOOST_FREQ_SHIFT);
1312 	seq_printf(m, "HW control enabled: %s\n",
1313 		   rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1314 	seq_printf(m, "SW control enabled: %s\n",
1315 		   rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1316 	seq_printf(m, "Gated voltage change: %s\n",
1317 		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1318 	seq_printf(m, "Starting frequency: P%d\n",
1319 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1320 	seq_printf(m, "Max P-state: P%d\n",
1321 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1322 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1323 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1324 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1325 	seq_printf(m, "Render standby enabled: %s\n",
1326 		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1327 	seq_puts(m, "Current RS state: ");
1328 	switch (rstdbyctl & RSX_STATUS_MASK) {
1329 	case RSX_STATUS_ON:
1330 		seq_puts(m, "on\n");
1331 		break;
1332 	case RSX_STATUS_RC1:
1333 		seq_puts(m, "RC1\n");
1334 		break;
1335 	case RSX_STATUS_RC1E:
1336 		seq_puts(m, "RC1E\n");
1337 		break;
1338 	case RSX_STATUS_RS1:
1339 		seq_puts(m, "RS1\n");
1340 		break;
1341 	case RSX_STATUS_RS2:
1342 		seq_puts(m, "RS2 (RC6)\n");
1343 		break;
1344 	case RSX_STATUS_RS3:
1345 		seq_puts(m, "RC3 (RC6+)\n");
1346 		break;
1347 	default:
1348 		seq_puts(m, "unknown\n");
1349 		break;
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 static int i915_forcewake_domains(struct seq_file *m, void *data)
1356 {
1357 	struct drm_info_node *node = m->private;
1358 	struct drm_device *dev = node->minor->dev;
1359 	struct drm_i915_private *dev_priv = dev->dev_private;
1360 	struct intel_uncore_forcewake_domain *fw_domain;
1361 	int i;
1362 
1363 	spin_lock_irq(&dev_priv->uncore.lock);
1364 	for_each_fw_domain(fw_domain, dev_priv, i) {
1365 		seq_printf(m, "%s.wake_count = %u\n",
1366 			   intel_uncore_forcewake_domain_to_str(i),
1367 			   fw_domain->wake_count);
1368 	}
1369 	spin_unlock_irq(&dev_priv->uncore.lock);
1370 
1371 	return 0;
1372 }
1373 
1374 static int vlv_drpc_info(struct seq_file *m)
1375 {
1376 	struct drm_info_node *node = m->private;
1377 	struct drm_device *dev = node->minor->dev;
1378 	struct drm_i915_private *dev_priv = dev->dev_private;
1379 	u32 rpmodectl1, rcctl1, pw_status;
1380 
1381 	intel_runtime_pm_get(dev_priv);
1382 
1383 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1384 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1385 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1386 
1387 	intel_runtime_pm_put(dev_priv);
1388 
1389 	seq_printf(m, "Video Turbo Mode: %s\n",
1390 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1391 	seq_printf(m, "Turbo enabled: %s\n",
1392 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1393 	seq_printf(m, "HW control enabled: %s\n",
1394 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1395 	seq_printf(m, "SW control enabled: %s\n",
1396 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1397 			  GEN6_RP_MEDIA_SW_MODE));
1398 	seq_printf(m, "RC6 Enabled: %s\n",
1399 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1400 					GEN6_RC_CTL_EI_MODE(1))));
1401 	seq_printf(m, "Render Power Well: %s\n",
1402 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1403 	seq_printf(m, "Media Power Well: %s\n",
1404 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1405 
1406 	seq_printf(m, "Render RC6 residency since boot: %u\n",
1407 		   I915_READ(VLV_GT_RENDER_RC6));
1408 	seq_printf(m, "Media RC6 residency since boot: %u\n",
1409 		   I915_READ(VLV_GT_MEDIA_RC6));
1410 
1411 	return i915_forcewake_domains(m, NULL);
1412 }
1413 
1414 static int gen6_drpc_info(struct seq_file *m)
1415 {
1416 	struct drm_info_node *node = m->private;
1417 	struct drm_device *dev = node->minor->dev;
1418 	struct drm_i915_private *dev_priv = dev->dev_private;
1419 	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1420 	unsigned forcewake_count;
1421 	int count = 0, ret;
1422 
1423 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1424 	if (ret)
1425 		return ret;
1426 	intel_runtime_pm_get(dev_priv);
1427 
1428 	spin_lock_irq(&dev_priv->uncore.lock);
1429 	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1430 	spin_unlock_irq(&dev_priv->uncore.lock);
1431 
1432 	if (forcewake_count) {
1433 		seq_puts(m, "RC information inaccurate because somebody "
1434 			    "holds a forcewake reference \n");
1435 	} else {
1436 		/* NB: we cannot use forcewake, else we read the wrong values */
1437 		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1438 			udelay(10);
1439 		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1440 	}
1441 
1442 	gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1443 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1444 
1445 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1446 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1447 	mutex_unlock(&dev->struct_mutex);
1448 	mutex_lock(&dev_priv->rps.hw_lock);
1449 	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1450 	mutex_unlock(&dev_priv->rps.hw_lock);
1451 
1452 	intel_runtime_pm_put(dev_priv);
1453 
1454 	seq_printf(m, "Video Turbo Mode: %s\n",
1455 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1456 	seq_printf(m, "HW control enabled: %s\n",
1457 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1458 	seq_printf(m, "SW control enabled: %s\n",
1459 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1460 			  GEN6_RP_MEDIA_SW_MODE));
1461 	seq_printf(m, "RC1e Enabled: %s\n",
1462 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1463 	seq_printf(m, "RC6 Enabled: %s\n",
1464 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1465 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1466 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1467 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1468 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1469 	seq_puts(m, "Current RC state: ");
1470 	switch (gt_core_status & GEN6_RCn_MASK) {
1471 	case GEN6_RC0:
1472 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1473 			seq_puts(m, "Core Power Down\n");
1474 		else
1475 			seq_puts(m, "on\n");
1476 		break;
1477 	case GEN6_RC3:
1478 		seq_puts(m, "RC3\n");
1479 		break;
1480 	case GEN6_RC6:
1481 		seq_puts(m, "RC6\n");
1482 		break;
1483 	case GEN6_RC7:
1484 		seq_puts(m, "RC7\n");
1485 		break;
1486 	default:
1487 		seq_puts(m, "Unknown\n");
1488 		break;
1489 	}
1490 
1491 	seq_printf(m, "Core Power Down: %s\n",
1492 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1493 
1494 	/* Not exactly sure what this is */
1495 	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1496 		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1497 	seq_printf(m, "RC6 residency since boot: %u\n",
1498 		   I915_READ(GEN6_GT_GFX_RC6));
1499 	seq_printf(m, "RC6+ residency since boot: %u\n",
1500 		   I915_READ(GEN6_GT_GFX_RC6p));
1501 	seq_printf(m, "RC6++ residency since boot: %u\n",
1502 		   I915_READ(GEN6_GT_GFX_RC6pp));
1503 
1504 	seq_printf(m, "RC6   voltage: %dmV\n",
1505 		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1506 	seq_printf(m, "RC6+  voltage: %dmV\n",
1507 		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1508 	seq_printf(m, "RC6++ voltage: %dmV\n",
1509 		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1510 	return 0;
1511 }
1512 
1513 static int i915_drpc_info(struct seq_file *m, void *unused)
1514 {
1515 	struct drm_info_node *node = m->private;
1516 	struct drm_device *dev = node->minor->dev;
1517 
1518 	if (IS_VALLEYVIEW(dev))
1519 		return vlv_drpc_info(m);
1520 	else if (INTEL_INFO(dev)->gen >= 6)
1521 		return gen6_drpc_info(m);
1522 	else
1523 		return ironlake_drpc_info(m);
1524 }
1525 
1526 static int i915_fbc_status(struct seq_file *m, void *unused)
1527 {
1528 	struct drm_info_node *node = m->private;
1529 	struct drm_device *dev = node->minor->dev;
1530 	struct drm_i915_private *dev_priv = dev->dev_private;
1531 
1532 	if (!HAS_FBC(dev)) {
1533 		seq_puts(m, "FBC unsupported on this chipset\n");
1534 		return 0;
1535 	}
1536 
1537 	intel_runtime_pm_get(dev_priv);
1538 
1539 	if (intel_fbc_enabled(dev)) {
1540 		seq_puts(m, "FBC enabled\n");
1541 	} else {
1542 		seq_puts(m, "FBC disabled: ");
1543 		switch (dev_priv->fbc.no_fbc_reason) {
1544 		case FBC_OK:
1545 			seq_puts(m, "FBC actived, but currently disabled in hardware");
1546 			break;
1547 		case FBC_UNSUPPORTED:
1548 			seq_puts(m, "unsupported by this chipset");
1549 			break;
1550 		case FBC_NO_OUTPUT:
1551 			seq_puts(m, "no outputs");
1552 			break;
1553 		case FBC_STOLEN_TOO_SMALL:
1554 			seq_puts(m, "not enough stolen memory");
1555 			break;
1556 		case FBC_UNSUPPORTED_MODE:
1557 			seq_puts(m, "mode not supported");
1558 			break;
1559 		case FBC_MODE_TOO_LARGE:
1560 			seq_puts(m, "mode too large");
1561 			break;
1562 		case FBC_BAD_PLANE:
1563 			seq_puts(m, "FBC unsupported on plane");
1564 			break;
1565 		case FBC_NOT_TILED:
1566 			seq_puts(m, "scanout buffer not tiled");
1567 			break;
1568 		case FBC_MULTIPLE_PIPES:
1569 			seq_puts(m, "multiple pipes are enabled");
1570 			break;
1571 		case FBC_MODULE_PARAM:
1572 			seq_puts(m, "disabled per module param (default off)");
1573 			break;
1574 		case FBC_CHIP_DEFAULT:
1575 			seq_puts(m, "disabled per chip default");
1576 			break;
1577 		default:
1578 			seq_puts(m, "unknown reason");
1579 		}
1580 		seq_putc(m, '\n');
1581 	}
1582 
1583 	intel_runtime_pm_put(dev_priv);
1584 
1585 	return 0;
1586 }
1587 
1588 static int i915_fbc_fc_get(void *data, u64 *val)
1589 {
1590 	struct drm_device *dev = data;
1591 	struct drm_i915_private *dev_priv = dev->dev_private;
1592 
1593 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1594 		return -ENODEV;
1595 
1596 	drm_modeset_lock_all(dev);
1597 	*val = dev_priv->fbc.false_color;
1598 	drm_modeset_unlock_all(dev);
1599 
1600 	return 0;
1601 }
1602 
1603 static int i915_fbc_fc_set(void *data, u64 val)
1604 {
1605 	struct drm_device *dev = data;
1606 	struct drm_i915_private *dev_priv = dev->dev_private;
1607 	u32 reg;
1608 
1609 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1610 		return -ENODEV;
1611 
1612 	drm_modeset_lock_all(dev);
1613 
1614 	reg = I915_READ(ILK_DPFC_CONTROL);
1615 	dev_priv->fbc.false_color = val;
1616 
1617 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1618 		   (reg | FBC_CTL_FALSE_COLOR) :
1619 		   (reg & ~FBC_CTL_FALSE_COLOR));
1620 
1621 	drm_modeset_unlock_all(dev);
1622 	return 0;
1623 }
1624 
1625 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1626 			i915_fbc_fc_get, i915_fbc_fc_set,
1627 			"%llu\n");
1628 
1629 static int i915_ips_status(struct seq_file *m, void *unused)
1630 {
1631 	struct drm_info_node *node = m->private;
1632 	struct drm_device *dev = node->minor->dev;
1633 	struct drm_i915_private *dev_priv = dev->dev_private;
1634 
1635 	if (!HAS_IPS(dev)) {
1636 		seq_puts(m, "not supported\n");
1637 		return 0;
1638 	}
1639 
1640 	intel_runtime_pm_get(dev_priv);
1641 
1642 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1643 		   yesno(i915.enable_ips));
1644 
1645 	if (INTEL_INFO(dev)->gen >= 8) {
1646 		seq_puts(m, "Currently: unknown\n");
1647 	} else {
1648 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1649 			seq_puts(m, "Currently: enabled\n");
1650 		else
1651 			seq_puts(m, "Currently: disabled\n");
1652 	}
1653 
1654 	intel_runtime_pm_put(dev_priv);
1655 
1656 	return 0;
1657 }
1658 
1659 static int i915_sr_status(struct seq_file *m, void *unused)
1660 {
1661 	struct drm_info_node *node = m->private;
1662 	struct drm_device *dev = node->minor->dev;
1663 	struct drm_i915_private *dev_priv = dev->dev_private;
1664 	bool sr_enabled = false;
1665 
1666 	intel_runtime_pm_get(dev_priv);
1667 
1668 	if (HAS_PCH_SPLIT(dev))
1669 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1670 	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
1671 		 IS_I945G(dev) || IS_I945GM(dev))
1672 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1673 	else if (IS_I915GM(dev))
1674 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1675 	else if (IS_PINEVIEW(dev))
1676 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1677 	else if (IS_VALLEYVIEW(dev))
1678 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1679 
1680 	intel_runtime_pm_put(dev_priv);
1681 
1682 	seq_printf(m, "self-refresh: %s\n",
1683 		   sr_enabled ? "enabled" : "disabled");
1684 
1685 	return 0;
1686 }
1687 
1688 static int i915_emon_status(struct seq_file *m, void *unused)
1689 {
1690 	struct drm_info_node *node = m->private;
1691 	struct drm_device *dev = node->minor->dev;
1692 	struct drm_i915_private *dev_priv = dev->dev_private;
1693 	unsigned long temp, chipset, gfx;
1694 	int ret;
1695 
1696 	if (!IS_GEN5(dev))
1697 		return -ENODEV;
1698 
1699 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1700 	if (ret)
1701 		return ret;
1702 
1703 	temp = i915_mch_val(dev_priv);
1704 	chipset = i915_chipset_val(dev_priv);
1705 	gfx = i915_gfx_val(dev_priv);
1706 	mutex_unlock(&dev->struct_mutex);
1707 
1708 	seq_printf(m, "GMCH temp: %ld\n", temp);
1709 	seq_printf(m, "Chipset power: %ld\n", chipset);
1710 	seq_printf(m, "GFX power: %ld\n", gfx);
1711 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1712 
1713 	return 0;
1714 }
1715 
1716 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1717 {
1718 	struct drm_info_node *node = m->private;
1719 	struct drm_device *dev = node->minor->dev;
1720 	struct drm_i915_private *dev_priv = dev->dev_private;
1721 	int ret = 0;
1722 	int gpu_freq, ia_freq;
1723 
1724 	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1725 		seq_puts(m, "unsupported on this chipset\n");
1726 		return 0;
1727 	}
1728 
1729 	intel_runtime_pm_get(dev_priv);
1730 
1731 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1732 
1733 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1734 	if (ret)
1735 		goto out;
1736 
1737 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1738 
1739 	for (gpu_freq = dev_priv->rps.min_freq_softlimit;
1740 	     gpu_freq <= dev_priv->rps.max_freq_softlimit;
1741 	     gpu_freq++) {
1742 		ia_freq = gpu_freq;
1743 		sandybridge_pcode_read(dev_priv,
1744 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1745 				       &ia_freq);
1746 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1747 			   intel_gpu_freq(dev_priv, gpu_freq),
1748 			   ((ia_freq >> 0) & 0xff) * 100,
1749 			   ((ia_freq >> 8) & 0xff) * 100);
1750 	}
1751 
1752 	mutex_unlock(&dev_priv->rps.hw_lock);
1753 
1754 out:
1755 	intel_runtime_pm_put(dev_priv);
1756 	return ret;
1757 }
1758 
1759 static int i915_opregion(struct seq_file *m, void *unused)
1760 {
1761 	struct drm_info_node *node = m->private;
1762 	struct drm_device *dev = node->minor->dev;
1763 	struct drm_i915_private *dev_priv = dev->dev_private;
1764 	struct intel_opregion *opregion = &dev_priv->opregion;
1765 	void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1766 	int ret;
1767 
1768 	if (data == NULL)
1769 		return -ENOMEM;
1770 
1771 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1772 	if (ret)
1773 		goto out;
1774 
1775 	if (opregion->header) {
1776 		memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1777 		seq_write(m, data, OPREGION_SIZE);
1778 	}
1779 
1780 	mutex_unlock(&dev->struct_mutex);
1781 
1782 out:
1783 	kfree(data);
1784 	return 0;
1785 }
1786 
1787 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1788 {
1789 	struct drm_info_node *node = m->private;
1790 	struct drm_device *dev = node->minor->dev;
1791 	struct intel_fbdev *ifbdev = NULL;
1792 	struct intel_framebuffer *fb;
1793 
1794 #ifdef CONFIG_DRM_I915_FBDEV
1795 	struct drm_i915_private *dev_priv = dev->dev_private;
1796 
1797 	ifbdev = dev_priv->fbdev;
1798 	fb = to_intel_framebuffer(ifbdev->helper.fb);
1799 
1800 	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1801 		   fb->base.width,
1802 		   fb->base.height,
1803 		   fb->base.depth,
1804 		   fb->base.bits_per_pixel,
1805 		   fb->base.modifier[0],
1806 		   atomic_read(&fb->base.refcount.refcount));
1807 	describe_obj(m, fb->obj);
1808 	seq_putc(m, '\n');
1809 #endif
1810 
1811 	mutex_lock(&dev->mode_config.fb_lock);
1812 	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1813 		if (ifbdev && &fb->base == ifbdev->helper.fb)
1814 			continue;
1815 
1816 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1817 			   fb->base.width,
1818 			   fb->base.height,
1819 			   fb->base.depth,
1820 			   fb->base.bits_per_pixel,
1821 			   fb->base.modifier[0],
1822 			   atomic_read(&fb->base.refcount.refcount));
1823 		describe_obj(m, fb->obj);
1824 		seq_putc(m, '\n');
1825 	}
1826 	mutex_unlock(&dev->mode_config.fb_lock);
1827 
1828 	return 0;
1829 }
1830 
1831 static void describe_ctx_ringbuf(struct seq_file *m,
1832 				 struct intel_ringbuffer *ringbuf)
1833 {
1834 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1835 		   ringbuf->space, ringbuf->head, ringbuf->tail,
1836 		   ringbuf->last_retired_head);
1837 }
1838 
1839 static int i915_context_status(struct seq_file *m, void *unused)
1840 {
1841 	struct drm_info_node *node = m->private;
1842 	struct drm_device *dev = node->minor->dev;
1843 	struct drm_i915_private *dev_priv = dev->dev_private;
1844 	struct intel_engine_cs *ring;
1845 	struct intel_context *ctx;
1846 	int ret, i;
1847 
1848 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1849 	if (ret)
1850 		return ret;
1851 
1852 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1853 		if (!i915.enable_execlists &&
1854 		    ctx->legacy_hw_ctx.rcs_state == NULL)
1855 			continue;
1856 
1857 		seq_puts(m, "HW context ");
1858 		describe_ctx(m, ctx);
1859 		for_each_ring(ring, dev_priv, i) {
1860 			if (ring->default_context == ctx)
1861 				seq_printf(m, "(default context %s) ",
1862 					   ring->name);
1863 		}
1864 
1865 		if (i915.enable_execlists) {
1866 			seq_putc(m, '\n');
1867 			for_each_ring(ring, dev_priv, i) {
1868 				struct drm_i915_gem_object *ctx_obj =
1869 					ctx->engine[i].state;
1870 				struct intel_ringbuffer *ringbuf =
1871 					ctx->engine[i].ringbuf;
1872 
1873 				seq_printf(m, "%s: ", ring->name);
1874 				if (ctx_obj)
1875 					describe_obj(m, ctx_obj);
1876 				if (ringbuf)
1877 					describe_ctx_ringbuf(m, ringbuf);
1878 				seq_putc(m, '\n');
1879 			}
1880 		} else {
1881 			describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1882 		}
1883 
1884 		seq_putc(m, '\n');
1885 	}
1886 
1887 	mutex_unlock(&dev->struct_mutex);
1888 
1889 	return 0;
1890 }
1891 
1892 static void i915_dump_lrc_obj(struct seq_file *m,
1893 			      struct intel_engine_cs *ring,
1894 			      struct drm_i915_gem_object *ctx_obj)
1895 {
1896 	struct page *page;
1897 	uint32_t *reg_state;
1898 	int j;
1899 	unsigned long ggtt_offset = 0;
1900 
1901 	if (ctx_obj == NULL) {
1902 		seq_printf(m, "Context on %s with no gem object\n",
1903 			   ring->name);
1904 		return;
1905 	}
1906 
1907 	seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1908 		   intel_execlists_ctx_id(ctx_obj));
1909 
1910 	if (!i915_gem_obj_ggtt_bound(ctx_obj))
1911 		seq_puts(m, "\tNot bound in GGTT\n");
1912 	else
1913 		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
1914 
1915 	if (i915_gem_object_get_pages(ctx_obj)) {
1916 		seq_puts(m, "\tFailed to get pages for context object\n");
1917 		return;
1918 	}
1919 
1920 	page = i915_gem_object_get_page(ctx_obj, 1);
1921 	if (!WARN_ON(page == NULL)) {
1922 		reg_state = kmap_atomic(page);
1923 
1924 		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1925 			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1926 				   ggtt_offset + 4096 + (j * 4),
1927 				   reg_state[j], reg_state[j + 1],
1928 				   reg_state[j + 2], reg_state[j + 3]);
1929 		}
1930 		kunmap_atomic(reg_state);
1931 	}
1932 
1933 	seq_putc(m, '\n');
1934 }
1935 
1936 static int i915_dump_lrc(struct seq_file *m, void *unused)
1937 {
1938 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1939 	struct drm_device *dev = node->minor->dev;
1940 	struct drm_i915_private *dev_priv = dev->dev_private;
1941 	struct intel_engine_cs *ring;
1942 	struct intel_context *ctx;
1943 	int ret, i;
1944 
1945 	if (!i915.enable_execlists) {
1946 		seq_printf(m, "Logical Ring Contexts are disabled\n");
1947 		return 0;
1948 	}
1949 
1950 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1951 	if (ret)
1952 		return ret;
1953 
1954 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1955 		for_each_ring(ring, dev_priv, i) {
1956 			if (ring->default_context != ctx)
1957 				i915_dump_lrc_obj(m, ring,
1958 						  ctx->engine[i].state);
1959 		}
1960 	}
1961 
1962 	mutex_unlock(&dev->struct_mutex);
1963 
1964 	return 0;
1965 }
1966 
1967 static int i915_execlists(struct seq_file *m, void *data)
1968 {
1969 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1970 	struct drm_device *dev = node->minor->dev;
1971 	struct drm_i915_private *dev_priv = dev->dev_private;
1972 	struct intel_engine_cs *ring;
1973 	u32 status_pointer;
1974 	u8 read_pointer;
1975 	u8 write_pointer;
1976 	u32 status;
1977 	u32 ctx_id;
1978 	struct list_head *cursor;
1979 	int ring_id, i;
1980 	int ret;
1981 
1982 	if (!i915.enable_execlists) {
1983 		seq_puts(m, "Logical Ring Contexts are disabled\n");
1984 		return 0;
1985 	}
1986 
1987 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1988 	if (ret)
1989 		return ret;
1990 
1991 	intel_runtime_pm_get(dev_priv);
1992 
1993 	for_each_ring(ring, dev_priv, ring_id) {
1994 		struct drm_i915_gem_request *head_req = NULL;
1995 		int count = 0;
1996 		unsigned long flags;
1997 
1998 		seq_printf(m, "%s\n", ring->name);
1999 
2000 		status = I915_READ(RING_EXECLIST_STATUS(ring));
2001 		ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
2002 		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
2003 			   status, ctx_id);
2004 
2005 		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
2006 		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
2007 
2008 		read_pointer = ring->next_context_status_buffer;
2009 		write_pointer = status_pointer & 0x07;
2010 		if (read_pointer > write_pointer)
2011 			write_pointer += 6;
2012 		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2013 			   read_pointer, write_pointer);
2014 
2015 		for (i = 0; i < 6; i++) {
2016 			status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
2017 			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
2018 
2019 			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
2020 				   i, status, ctx_id);
2021 		}
2022 
2023 		spin_lock_irqsave(&ring->execlist_lock, flags);
2024 		list_for_each(cursor, &ring->execlist_queue)
2025 			count++;
2026 		head_req = list_first_entry_or_null(&ring->execlist_queue,
2027 				struct drm_i915_gem_request, execlist_link);
2028 		spin_unlock_irqrestore(&ring->execlist_lock, flags);
2029 
2030 		seq_printf(m, "\t%d requests in queue\n", count);
2031 		if (head_req) {
2032 			struct drm_i915_gem_object *ctx_obj;
2033 
2034 			ctx_obj = head_req->ctx->engine[ring_id].state;
2035 			seq_printf(m, "\tHead request id: %u\n",
2036 				   intel_execlists_ctx_id(ctx_obj));
2037 			seq_printf(m, "\tHead request tail: %u\n",
2038 				   head_req->tail);
2039 		}
2040 
2041 		seq_putc(m, '\n');
2042 	}
2043 
2044 	intel_runtime_pm_put(dev_priv);
2045 	mutex_unlock(&dev->struct_mutex);
2046 
2047 	return 0;
2048 }
2049 
2050 static const char *swizzle_string(unsigned swizzle)
2051 {
2052 	switch (swizzle) {
2053 	case I915_BIT_6_SWIZZLE_NONE:
2054 		return "none";
2055 	case I915_BIT_6_SWIZZLE_9:
2056 		return "bit9";
2057 	case I915_BIT_6_SWIZZLE_9_10:
2058 		return "bit9/bit10";
2059 	case I915_BIT_6_SWIZZLE_9_11:
2060 		return "bit9/bit11";
2061 	case I915_BIT_6_SWIZZLE_9_10_11:
2062 		return "bit9/bit10/bit11";
2063 	case I915_BIT_6_SWIZZLE_9_17:
2064 		return "bit9/bit17";
2065 	case I915_BIT_6_SWIZZLE_9_10_17:
2066 		return "bit9/bit10/bit17";
2067 	case I915_BIT_6_SWIZZLE_UNKNOWN:
2068 		return "unknown";
2069 	}
2070 
2071 	return "bug";
2072 }
2073 
2074 static int i915_swizzle_info(struct seq_file *m, void *data)
2075 {
2076 	struct drm_info_node *node = m->private;
2077 	struct drm_device *dev = node->minor->dev;
2078 	struct drm_i915_private *dev_priv = dev->dev_private;
2079 	int ret;
2080 
2081 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2082 	if (ret)
2083 		return ret;
2084 	intel_runtime_pm_get(dev_priv);
2085 
2086 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2087 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2088 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2089 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2090 
2091 	if (IS_GEN3(dev) || IS_GEN4(dev)) {
2092 		seq_printf(m, "DDC = 0x%08x\n",
2093 			   I915_READ(DCC));
2094 		seq_printf(m, "DDC2 = 0x%08x\n",
2095 			   I915_READ(DCC2));
2096 		seq_printf(m, "C0DRB3 = 0x%04x\n",
2097 			   I915_READ16(C0DRB3));
2098 		seq_printf(m, "C1DRB3 = 0x%04x\n",
2099 			   I915_READ16(C1DRB3));
2100 	} else if (INTEL_INFO(dev)->gen >= 6) {
2101 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2102 			   I915_READ(MAD_DIMM_C0));
2103 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2104 			   I915_READ(MAD_DIMM_C1));
2105 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2106 			   I915_READ(MAD_DIMM_C2));
2107 		seq_printf(m, "TILECTL = 0x%08x\n",
2108 			   I915_READ(TILECTL));
2109 		if (INTEL_INFO(dev)->gen >= 8)
2110 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2111 				   I915_READ(GAMTARBMODE));
2112 		else
2113 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2114 				   I915_READ(ARB_MODE));
2115 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2116 			   I915_READ(DISP_ARB_CTL));
2117 	}
2118 
2119 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2120 		seq_puts(m, "L-shaped memory detected\n");
2121 
2122 	intel_runtime_pm_put(dev_priv);
2123 	mutex_unlock(&dev->struct_mutex);
2124 
2125 	return 0;
2126 }
2127 
2128 static int per_file_ctx(int id, void *ptr, void *data)
2129 {
2130 	struct intel_context *ctx = ptr;
2131 	struct seq_file *m = data;
2132 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2133 
2134 	if (!ppgtt) {
2135 		seq_printf(m, "  no ppgtt for context %d\n",
2136 			   ctx->user_handle);
2137 		return 0;
2138 	}
2139 
2140 	if (i915_gem_context_is_default(ctx))
2141 		seq_puts(m, "  default context:\n");
2142 	else
2143 		seq_printf(m, "  context %d:\n", ctx->user_handle);
2144 	ppgtt->debug_dump(ppgtt, m);
2145 
2146 	return 0;
2147 }
2148 
2149 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2150 {
2151 	struct drm_i915_private *dev_priv = dev->dev_private;
2152 	struct intel_engine_cs *ring;
2153 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2154 	int unused, i;
2155 
2156 	if (!ppgtt)
2157 		return;
2158 
2159 	seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
2160 	seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
2161 	for_each_ring(ring, dev_priv, unused) {
2162 		seq_printf(m, "%s\n", ring->name);
2163 		for (i = 0; i < 4; i++) {
2164 			u32 offset = 0x270 + i * 8;
2165 			u64 pdp = I915_READ(ring->mmio_base + offset + 4);
2166 			pdp <<= 32;
2167 			pdp |= I915_READ(ring->mmio_base + offset);
2168 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2169 		}
2170 	}
2171 }
2172 
2173 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2174 {
2175 	struct drm_i915_private *dev_priv = dev->dev_private;
2176 	struct intel_engine_cs *ring;
2177 	struct drm_file *file;
2178 	int i;
2179 
2180 	if (INTEL_INFO(dev)->gen == 6)
2181 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2182 
2183 	for_each_ring(ring, dev_priv, i) {
2184 		seq_printf(m, "%s\n", ring->name);
2185 		if (INTEL_INFO(dev)->gen == 7)
2186 			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
2187 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
2188 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
2189 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
2190 	}
2191 	if (dev_priv->mm.aliasing_ppgtt) {
2192 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2193 
2194 		seq_puts(m, "aliasing PPGTT:\n");
2195 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
2196 
2197 		ppgtt->debug_dump(ppgtt, m);
2198 	}
2199 
2200 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2201 		struct drm_i915_file_private *file_priv = file->driver_priv;
2202 
2203 		seq_printf(m, "proc: %s\n",
2204 			   get_pid_task(file->pid, PIDTYPE_PID)->comm);
2205 		idr_for_each(&file_priv->context_idr, per_file_ctx, m);
2206 	}
2207 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2208 }
2209 
2210 static int i915_ppgtt_info(struct seq_file *m, void *data)
2211 {
2212 	struct drm_info_node *node = m->private;
2213 	struct drm_device *dev = node->minor->dev;
2214 	struct drm_i915_private *dev_priv = dev->dev_private;
2215 
2216 	int ret = mutex_lock_interruptible(&dev->struct_mutex);
2217 	if (ret)
2218 		return ret;
2219 	intel_runtime_pm_get(dev_priv);
2220 
2221 	if (INTEL_INFO(dev)->gen >= 8)
2222 		gen8_ppgtt_info(m, dev);
2223 	else if (INTEL_INFO(dev)->gen >= 6)
2224 		gen6_ppgtt_info(m, dev);
2225 
2226 	intel_runtime_pm_put(dev_priv);
2227 	mutex_unlock(&dev->struct_mutex);
2228 
2229 	return 0;
2230 }
2231 
2232 static int i915_llc(struct seq_file *m, void *data)
2233 {
2234 	struct drm_info_node *node = m->private;
2235 	struct drm_device *dev = node->minor->dev;
2236 	struct drm_i915_private *dev_priv = dev->dev_private;
2237 
2238 	/* Size calculation for LLC is a bit of a pain. Ignore for now. */
2239 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2240 	seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
2241 
2242 	return 0;
2243 }
2244 
2245 static int i915_edp_psr_status(struct seq_file *m, void *data)
2246 {
2247 	struct drm_info_node *node = m->private;
2248 	struct drm_device *dev = node->minor->dev;
2249 	struct drm_i915_private *dev_priv = dev->dev_private;
2250 	u32 psrperf = 0;
2251 	u32 stat[3];
2252 	enum pipe pipe;
2253 	bool enabled = false;
2254 
2255 	if (!HAS_PSR(dev)) {
2256 		seq_puts(m, "PSR not supported\n");
2257 		return 0;
2258 	}
2259 
2260 	intel_runtime_pm_get(dev_priv);
2261 
2262 	mutex_lock(&dev_priv->psr.lock);
2263 	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2264 	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2265 	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2266 	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2267 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2268 		   dev_priv->psr.busy_frontbuffer_bits);
2269 	seq_printf(m, "Re-enable work scheduled: %s\n",
2270 		   yesno(work_busy(&dev_priv->psr.work.work)));
2271 
2272 	if (HAS_DDI(dev))
2273 		enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
2274 	else {
2275 		for_each_pipe(dev_priv, pipe) {
2276 			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2277 				VLV_EDP_PSR_CURR_STATE_MASK;
2278 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2279 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2280 				enabled = true;
2281 		}
2282 	}
2283 	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2284 
2285 	if (!HAS_DDI(dev))
2286 		for_each_pipe(dev_priv, pipe) {
2287 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2288 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2289 				seq_printf(m, " pipe %c", pipe_name(pipe));
2290 		}
2291 	seq_puts(m, "\n");
2292 
2293 	seq_printf(m, "Link standby: %s\n",
2294 		   yesno((bool)dev_priv->psr.link_standby));
2295 
2296 	/* CHV PSR has no kind of performance counter */
2297 	if (HAS_DDI(dev)) {
2298 		psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
2299 			EDP_PSR_PERF_CNT_MASK;
2300 
2301 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2302 	}
2303 	mutex_unlock(&dev_priv->psr.lock);
2304 
2305 	intel_runtime_pm_put(dev_priv);
2306 	return 0;
2307 }
2308 
2309 static int i915_sink_crc(struct seq_file *m, void *data)
2310 {
2311 	struct drm_info_node *node = m->private;
2312 	struct drm_device *dev = node->minor->dev;
2313 	struct intel_encoder *encoder;
2314 	struct intel_connector *connector;
2315 	struct intel_dp *intel_dp = NULL;
2316 	int ret;
2317 	u8 crc[6];
2318 
2319 	drm_modeset_lock_all(dev);
2320 	for_each_intel_connector(dev, connector) {
2321 
2322 		if (connector->base.dpms != DRM_MODE_DPMS_ON)
2323 			continue;
2324 
2325 		if (!connector->base.encoder)
2326 			continue;
2327 
2328 		encoder = to_intel_encoder(connector->base.encoder);
2329 		if (encoder->type != INTEL_OUTPUT_EDP)
2330 			continue;
2331 
2332 		intel_dp = enc_to_intel_dp(&encoder->base);
2333 
2334 		ret = intel_dp_sink_crc(intel_dp, crc);
2335 		if (ret)
2336 			goto out;
2337 
2338 		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2339 			   crc[0], crc[1], crc[2],
2340 			   crc[3], crc[4], crc[5]);
2341 		goto out;
2342 	}
2343 	ret = -ENODEV;
2344 out:
2345 	drm_modeset_unlock_all(dev);
2346 	return ret;
2347 }
2348 
2349 static int i915_energy_uJ(struct seq_file *m, void *data)
2350 {
2351 	struct drm_info_node *node = m->private;
2352 	struct drm_device *dev = node->minor->dev;
2353 	struct drm_i915_private *dev_priv = dev->dev_private;
2354 	u64 power;
2355 	u32 units;
2356 
2357 	if (INTEL_INFO(dev)->gen < 6)
2358 		return -ENODEV;
2359 
2360 	intel_runtime_pm_get(dev_priv);
2361 
2362 	rdmsrl(MSR_RAPL_POWER_UNIT, power);
2363 	power = (power & 0x1f00) >> 8;
2364 	units = 1000000 / (1 << power); /* convert to uJ */
2365 	power = I915_READ(MCH_SECP_NRG_STTS);
2366 	power *= units;
2367 
2368 	intel_runtime_pm_put(dev_priv);
2369 
2370 	seq_printf(m, "%llu", (long long unsigned)power);
2371 
2372 	return 0;
2373 }
2374 
2375 static int i915_pc8_status(struct seq_file *m, void *unused)
2376 {
2377 	struct drm_info_node *node = m->private;
2378 	struct drm_device *dev = node->minor->dev;
2379 	struct drm_i915_private *dev_priv = dev->dev_private;
2380 
2381 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2382 		seq_puts(m, "not supported\n");
2383 		return 0;
2384 	}
2385 
2386 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2387 	seq_printf(m, "IRQs disabled: %s\n",
2388 		   yesno(!intel_irqs_enabled(dev_priv)));
2389 
2390 	return 0;
2391 }
2392 
2393 static const char *power_domain_str(enum intel_display_power_domain domain)
2394 {
2395 	switch (domain) {
2396 	case POWER_DOMAIN_PIPE_A:
2397 		return "PIPE_A";
2398 	case POWER_DOMAIN_PIPE_B:
2399 		return "PIPE_B";
2400 	case POWER_DOMAIN_PIPE_C:
2401 		return "PIPE_C";
2402 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
2403 		return "PIPE_A_PANEL_FITTER";
2404 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
2405 		return "PIPE_B_PANEL_FITTER";
2406 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
2407 		return "PIPE_C_PANEL_FITTER";
2408 	case POWER_DOMAIN_TRANSCODER_A:
2409 		return "TRANSCODER_A";
2410 	case POWER_DOMAIN_TRANSCODER_B:
2411 		return "TRANSCODER_B";
2412 	case POWER_DOMAIN_TRANSCODER_C:
2413 		return "TRANSCODER_C";
2414 	case POWER_DOMAIN_TRANSCODER_EDP:
2415 		return "TRANSCODER_EDP";
2416 	case POWER_DOMAIN_PORT_DDI_A_2_LANES:
2417 		return "PORT_DDI_A_2_LANES";
2418 	case POWER_DOMAIN_PORT_DDI_A_4_LANES:
2419 		return "PORT_DDI_A_4_LANES";
2420 	case POWER_DOMAIN_PORT_DDI_B_2_LANES:
2421 		return "PORT_DDI_B_2_LANES";
2422 	case POWER_DOMAIN_PORT_DDI_B_4_LANES:
2423 		return "PORT_DDI_B_4_LANES";
2424 	case POWER_DOMAIN_PORT_DDI_C_2_LANES:
2425 		return "PORT_DDI_C_2_LANES";
2426 	case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2427 		return "PORT_DDI_C_4_LANES";
2428 	case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2429 		return "PORT_DDI_D_2_LANES";
2430 	case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2431 		return "PORT_DDI_D_4_LANES";
2432 	case POWER_DOMAIN_PORT_DSI:
2433 		return "PORT_DSI";
2434 	case POWER_DOMAIN_PORT_CRT:
2435 		return "PORT_CRT";
2436 	case POWER_DOMAIN_PORT_OTHER:
2437 		return "PORT_OTHER";
2438 	case POWER_DOMAIN_VGA:
2439 		return "VGA";
2440 	case POWER_DOMAIN_AUDIO:
2441 		return "AUDIO";
2442 	case POWER_DOMAIN_PLLS:
2443 		return "PLLS";
2444 	case POWER_DOMAIN_AUX_A:
2445 		return "AUX_A";
2446 	case POWER_DOMAIN_AUX_B:
2447 		return "AUX_B";
2448 	case POWER_DOMAIN_AUX_C:
2449 		return "AUX_C";
2450 	case POWER_DOMAIN_AUX_D:
2451 		return "AUX_D";
2452 	case POWER_DOMAIN_INIT:
2453 		return "INIT";
2454 	default:
2455 		MISSING_CASE(domain);
2456 		return "?";
2457 	}
2458 }
2459 
2460 static int i915_power_domain_info(struct seq_file *m, void *unused)
2461 {
2462 	struct drm_info_node *node = m->private;
2463 	struct drm_device *dev = node->minor->dev;
2464 	struct drm_i915_private *dev_priv = dev->dev_private;
2465 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2466 	int i;
2467 
2468 	mutex_lock(&power_domains->lock);
2469 
2470 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2471 	for (i = 0; i < power_domains->power_well_count; i++) {
2472 		struct i915_power_well *power_well;
2473 		enum intel_display_power_domain power_domain;
2474 
2475 		power_well = &power_domains->power_wells[i];
2476 		seq_printf(m, "%-25s %d\n", power_well->name,
2477 			   power_well->count);
2478 
2479 		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2480 		     power_domain++) {
2481 			if (!(BIT(power_domain) & power_well->domains))
2482 				continue;
2483 
2484 			seq_printf(m, "  %-23s %d\n",
2485 				 power_domain_str(power_domain),
2486 				 power_domains->domain_use_count[power_domain]);
2487 		}
2488 	}
2489 
2490 	mutex_unlock(&power_domains->lock);
2491 
2492 	return 0;
2493 }
2494 
2495 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2496 				 struct drm_display_mode *mode)
2497 {
2498 	int i;
2499 
2500 	for (i = 0; i < tabs; i++)
2501 		seq_putc(m, '\t');
2502 
2503 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2504 		   mode->base.id, mode->name,
2505 		   mode->vrefresh, mode->clock,
2506 		   mode->hdisplay, mode->hsync_start,
2507 		   mode->hsync_end, mode->htotal,
2508 		   mode->vdisplay, mode->vsync_start,
2509 		   mode->vsync_end, mode->vtotal,
2510 		   mode->type, mode->flags);
2511 }
2512 
2513 static void intel_encoder_info(struct seq_file *m,
2514 			       struct intel_crtc *intel_crtc,
2515 			       struct intel_encoder *intel_encoder)
2516 {
2517 	struct drm_info_node *node = m->private;
2518 	struct drm_device *dev = node->minor->dev;
2519 	struct drm_crtc *crtc = &intel_crtc->base;
2520 	struct intel_connector *intel_connector;
2521 	struct drm_encoder *encoder;
2522 
2523 	encoder = &intel_encoder->base;
2524 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2525 		   encoder->base.id, encoder->name);
2526 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2527 		struct drm_connector *connector = &intel_connector->base;
2528 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2529 			   connector->base.id,
2530 			   connector->name,
2531 			   drm_get_connector_status_name(connector->status));
2532 		if (connector->status == connector_status_connected) {
2533 			struct drm_display_mode *mode = &crtc->mode;
2534 			seq_printf(m, ", mode:\n");
2535 			intel_seq_print_mode(m, 2, mode);
2536 		} else {
2537 			seq_putc(m, '\n');
2538 		}
2539 	}
2540 }
2541 
2542 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2543 {
2544 	struct drm_info_node *node = m->private;
2545 	struct drm_device *dev = node->minor->dev;
2546 	struct drm_crtc *crtc = &intel_crtc->base;
2547 	struct intel_encoder *intel_encoder;
2548 
2549 	if (crtc->primary->fb)
2550 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2551 			   crtc->primary->fb->base.id, crtc->x, crtc->y,
2552 			   crtc->primary->fb->width, crtc->primary->fb->height);
2553 	else
2554 		seq_puts(m, "\tprimary plane disabled\n");
2555 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2556 		intel_encoder_info(m, intel_crtc, intel_encoder);
2557 }
2558 
2559 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2560 {
2561 	struct drm_display_mode *mode = panel->fixed_mode;
2562 
2563 	seq_printf(m, "\tfixed mode:\n");
2564 	intel_seq_print_mode(m, 2, mode);
2565 }
2566 
2567 static void intel_dp_info(struct seq_file *m,
2568 			  struct intel_connector *intel_connector)
2569 {
2570 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2571 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2572 
2573 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2574 	seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
2575 		   "no");
2576 	if (intel_encoder->type == INTEL_OUTPUT_EDP)
2577 		intel_panel_info(m, &intel_connector->panel);
2578 }
2579 
2580 static void intel_hdmi_info(struct seq_file *m,
2581 			    struct intel_connector *intel_connector)
2582 {
2583 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2584 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2585 
2586 	seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
2587 		   "no");
2588 }
2589 
2590 static void intel_lvds_info(struct seq_file *m,
2591 			    struct intel_connector *intel_connector)
2592 {
2593 	intel_panel_info(m, &intel_connector->panel);
2594 }
2595 
2596 static void intel_connector_info(struct seq_file *m,
2597 				 struct drm_connector *connector)
2598 {
2599 	struct intel_connector *intel_connector = to_intel_connector(connector);
2600 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2601 	struct drm_display_mode *mode;
2602 
2603 	seq_printf(m, "connector %d: type %s, status: %s\n",
2604 		   connector->base.id, connector->name,
2605 		   drm_get_connector_status_name(connector->status));
2606 	if (connector->status == connector_status_connected) {
2607 		seq_printf(m, "\tname: %s\n", connector->display_info.name);
2608 		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2609 			   connector->display_info.width_mm,
2610 			   connector->display_info.height_mm);
2611 		seq_printf(m, "\tsubpixel order: %s\n",
2612 			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2613 		seq_printf(m, "\tCEA rev: %d\n",
2614 			   connector->display_info.cea_rev);
2615 	}
2616 	if (intel_encoder) {
2617 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2618 		    intel_encoder->type == INTEL_OUTPUT_EDP)
2619 			intel_dp_info(m, intel_connector);
2620 		else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2621 			intel_hdmi_info(m, intel_connector);
2622 		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2623 			intel_lvds_info(m, intel_connector);
2624 	}
2625 
2626 	seq_printf(m, "\tmodes:\n");
2627 	list_for_each_entry(mode, &connector->modes, head)
2628 		intel_seq_print_mode(m, 2, mode);
2629 }
2630 
2631 static bool cursor_active(struct drm_device *dev, int pipe)
2632 {
2633 	struct drm_i915_private *dev_priv = dev->dev_private;
2634 	u32 state;
2635 
2636 	if (IS_845G(dev) || IS_I865G(dev))
2637 		state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
2638 	else
2639 		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2640 
2641 	return state;
2642 }
2643 
2644 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2645 {
2646 	struct drm_i915_private *dev_priv = dev->dev_private;
2647 	u32 pos;
2648 
2649 	pos = I915_READ(CURPOS(pipe));
2650 
2651 	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2652 	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2653 		*x = -*x;
2654 
2655 	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2656 	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2657 		*y = -*y;
2658 
2659 	return cursor_active(dev, pipe);
2660 }
2661 
2662 static int i915_display_info(struct seq_file *m, void *unused)
2663 {
2664 	struct drm_info_node *node = m->private;
2665 	struct drm_device *dev = node->minor->dev;
2666 	struct drm_i915_private *dev_priv = dev->dev_private;
2667 	struct intel_crtc *crtc;
2668 	struct drm_connector *connector;
2669 
2670 	intel_runtime_pm_get(dev_priv);
2671 	drm_modeset_lock_all(dev);
2672 	seq_printf(m, "CRTC info\n");
2673 	seq_printf(m, "---------\n");
2674 	for_each_intel_crtc(dev, crtc) {
2675 		bool active;
2676 		int x, y;
2677 
2678 		seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2679 			   crtc->base.base.id, pipe_name(crtc->pipe),
2680 			   yesno(crtc->active), crtc->config->pipe_src_w,
2681 			   crtc->config->pipe_src_h);
2682 		if (crtc->active) {
2683 			intel_crtc_info(m, crtc);
2684 
2685 			active = cursor_position(dev, crtc->pipe, &x, &y);
2686 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2687 				   yesno(crtc->cursor_base),
2688 				   x, y, crtc->base.cursor->state->crtc_w,
2689 				   crtc->base.cursor->state->crtc_h,
2690 				   crtc->cursor_addr, yesno(active));
2691 		}
2692 
2693 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2694 			   yesno(!crtc->cpu_fifo_underrun_disabled),
2695 			   yesno(!crtc->pch_fifo_underrun_disabled));
2696 	}
2697 
2698 	seq_printf(m, "\n");
2699 	seq_printf(m, "Connector info\n");
2700 	seq_printf(m, "--------------\n");
2701 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2702 		intel_connector_info(m, connector);
2703 	}
2704 	drm_modeset_unlock_all(dev);
2705 	intel_runtime_pm_put(dev_priv);
2706 
2707 	return 0;
2708 }
2709 
2710 static int i915_semaphore_status(struct seq_file *m, void *unused)
2711 {
2712 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2713 	struct drm_device *dev = node->minor->dev;
2714 	struct drm_i915_private *dev_priv = dev->dev_private;
2715 	struct intel_engine_cs *ring;
2716 	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
2717 	int i, j, ret;
2718 
2719 	if (!i915_semaphore_is_enabled(dev)) {
2720 		seq_puts(m, "Semaphores are disabled\n");
2721 		return 0;
2722 	}
2723 
2724 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2725 	if (ret)
2726 		return ret;
2727 	intel_runtime_pm_get(dev_priv);
2728 
2729 	if (IS_BROADWELL(dev)) {
2730 		struct page *page;
2731 		uint64_t *seqno;
2732 
2733 		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
2734 
2735 		seqno = (uint64_t *)kmap_atomic(page);
2736 		for_each_ring(ring, dev_priv, i) {
2737 			uint64_t offset;
2738 
2739 			seq_printf(m, "%s\n", ring->name);
2740 
2741 			seq_puts(m, "  Last signal:");
2742 			for (j = 0; j < num_rings; j++) {
2743 				offset = i * I915_NUM_RINGS + j;
2744 				seq_printf(m, "0x%08llx (0x%02llx) ",
2745 					   seqno[offset], offset * 8);
2746 			}
2747 			seq_putc(m, '\n');
2748 
2749 			seq_puts(m, "  Last wait:  ");
2750 			for (j = 0; j < num_rings; j++) {
2751 				offset = i + (j * I915_NUM_RINGS);
2752 				seq_printf(m, "0x%08llx (0x%02llx) ",
2753 					   seqno[offset], offset * 8);
2754 			}
2755 			seq_putc(m, '\n');
2756 
2757 		}
2758 		kunmap_atomic(seqno);
2759 	} else {
2760 		seq_puts(m, "  Last signal:");
2761 		for_each_ring(ring, dev_priv, i)
2762 			for (j = 0; j < num_rings; j++)
2763 				seq_printf(m, "0x%08x\n",
2764 					   I915_READ(ring->semaphore.mbox.signal[j]));
2765 		seq_putc(m, '\n');
2766 	}
2767 
2768 	seq_puts(m, "\nSync seqno:\n");
2769 	for_each_ring(ring, dev_priv, i) {
2770 		for (j = 0; j < num_rings; j++) {
2771 			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
2772 		}
2773 		seq_putc(m, '\n');
2774 	}
2775 	seq_putc(m, '\n');
2776 
2777 	intel_runtime_pm_put(dev_priv);
2778 	mutex_unlock(&dev->struct_mutex);
2779 	return 0;
2780 }
2781 
2782 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2783 {
2784 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2785 	struct drm_device *dev = node->minor->dev;
2786 	struct drm_i915_private *dev_priv = dev->dev_private;
2787 	int i;
2788 
2789 	drm_modeset_lock_all(dev);
2790 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2791 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2792 
2793 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2794 		seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
2795 			   pll->config.crtc_mask, pll->active, yesno(pll->on));
2796 		seq_printf(m, " tracked hardware state:\n");
2797 		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
2798 		seq_printf(m, " dpll_md: 0x%08x\n",
2799 			   pll->config.hw_state.dpll_md);
2800 		seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
2801 		seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
2802 		seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
2803 	}
2804 	drm_modeset_unlock_all(dev);
2805 
2806 	return 0;
2807 }
2808 
2809 static int i915_wa_registers(struct seq_file *m, void *unused)
2810 {
2811 	int i;
2812 	int ret;
2813 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2814 	struct drm_device *dev = node->minor->dev;
2815 	struct drm_i915_private *dev_priv = dev->dev_private;
2816 
2817 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2818 	if (ret)
2819 		return ret;
2820 
2821 	intel_runtime_pm_get(dev_priv);
2822 
2823 	seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
2824 	for (i = 0; i < dev_priv->workarounds.count; ++i) {
2825 		u32 addr, mask, value, read;
2826 		bool ok;
2827 
2828 		addr = dev_priv->workarounds.reg[i].addr;
2829 		mask = dev_priv->workarounds.reg[i].mask;
2830 		value = dev_priv->workarounds.reg[i].value;
2831 		read = I915_READ(addr);
2832 		ok = (value & mask) == (read & mask);
2833 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
2834 			   addr, value, mask, read, ok ? "OK" : "FAIL");
2835 	}
2836 
2837 	intel_runtime_pm_put(dev_priv);
2838 	mutex_unlock(&dev->struct_mutex);
2839 
2840 	return 0;
2841 }
2842 
2843 static int i915_ddb_info(struct seq_file *m, void *unused)
2844 {
2845 	struct drm_info_node *node = m->private;
2846 	struct drm_device *dev = node->minor->dev;
2847 	struct drm_i915_private *dev_priv = dev->dev_private;
2848 	struct skl_ddb_allocation *ddb;
2849 	struct skl_ddb_entry *entry;
2850 	enum pipe pipe;
2851 	int plane;
2852 
2853 	if (INTEL_INFO(dev)->gen < 9)
2854 		return 0;
2855 
2856 	drm_modeset_lock_all(dev);
2857 
2858 	ddb = &dev_priv->wm.skl_hw.ddb;
2859 
2860 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2861 
2862 	for_each_pipe(dev_priv, pipe) {
2863 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2864 
2865 		for_each_plane(dev_priv, pipe, plane) {
2866 			entry = &ddb->plane[pipe][plane];
2867 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
2868 				   entry->start, entry->end,
2869 				   skl_ddb_entry_size(entry));
2870 		}
2871 
2872 		entry = &ddb->cursor[pipe];
2873 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
2874 			   entry->end, skl_ddb_entry_size(entry));
2875 	}
2876 
2877 	drm_modeset_unlock_all(dev);
2878 
2879 	return 0;
2880 }
2881 
2882 static void drrs_status_per_crtc(struct seq_file *m,
2883 		struct drm_device *dev, struct intel_crtc *intel_crtc)
2884 {
2885 	struct intel_encoder *intel_encoder;
2886 	struct drm_i915_private *dev_priv = dev->dev_private;
2887 	struct i915_drrs *drrs = &dev_priv->drrs;
2888 	int vrefresh = 0;
2889 
2890 	for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
2891 		/* Encoder connected on this CRTC */
2892 		switch (intel_encoder->type) {
2893 		case INTEL_OUTPUT_EDP:
2894 			seq_puts(m, "eDP:\n");
2895 			break;
2896 		case INTEL_OUTPUT_DSI:
2897 			seq_puts(m, "DSI:\n");
2898 			break;
2899 		case INTEL_OUTPUT_HDMI:
2900 			seq_puts(m, "HDMI:\n");
2901 			break;
2902 		case INTEL_OUTPUT_DISPLAYPORT:
2903 			seq_puts(m, "DP:\n");
2904 			break;
2905 		default:
2906 			seq_printf(m, "Other encoder (id=%d).\n",
2907 						intel_encoder->type);
2908 			return;
2909 		}
2910 	}
2911 
2912 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
2913 		seq_puts(m, "\tVBT: DRRS_type: Static");
2914 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
2915 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
2916 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
2917 		seq_puts(m, "\tVBT: DRRS_type: None");
2918 	else
2919 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
2920 
2921 	seq_puts(m, "\n\n");
2922 
2923 	if (intel_crtc->config->has_drrs) {
2924 		struct intel_panel *panel;
2925 
2926 		mutex_lock(&drrs->mutex);
2927 		/* DRRS Supported */
2928 		seq_puts(m, "\tDRRS Supported: Yes\n");
2929 
2930 		/* disable_drrs() will make drrs->dp NULL */
2931 		if (!drrs->dp) {
2932 			seq_puts(m, "Idleness DRRS: Disabled");
2933 			mutex_unlock(&drrs->mutex);
2934 			return;
2935 		}
2936 
2937 		panel = &drrs->dp->attached_connector->panel;
2938 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
2939 					drrs->busy_frontbuffer_bits);
2940 
2941 		seq_puts(m, "\n\t\t");
2942 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
2943 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
2944 			vrefresh = panel->fixed_mode->vrefresh;
2945 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
2946 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
2947 			vrefresh = panel->downclock_mode->vrefresh;
2948 		} else {
2949 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
2950 						drrs->refresh_rate_type);
2951 			mutex_unlock(&drrs->mutex);
2952 			return;
2953 		}
2954 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
2955 
2956 		seq_puts(m, "\n\t\t");
2957 		mutex_unlock(&drrs->mutex);
2958 	} else {
2959 		/* DRRS not supported. Print the VBT parameter*/
2960 		seq_puts(m, "\tDRRS Supported : No");
2961 	}
2962 	seq_puts(m, "\n");
2963 }
2964 
2965 static int i915_drrs_status(struct seq_file *m, void *unused)
2966 {
2967 	struct drm_info_node *node = m->private;
2968 	struct drm_device *dev = node->minor->dev;
2969 	struct intel_crtc *intel_crtc;
2970 	int active_crtc_cnt = 0;
2971 
2972 	for_each_intel_crtc(dev, intel_crtc) {
2973 		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
2974 
2975 		if (intel_crtc->active) {
2976 			active_crtc_cnt++;
2977 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
2978 
2979 			drrs_status_per_crtc(m, dev, intel_crtc);
2980 		}
2981 
2982 		drm_modeset_unlock(&intel_crtc->base.mutex);
2983 	}
2984 
2985 	if (!active_crtc_cnt)
2986 		seq_puts(m, "No active crtc found\n");
2987 
2988 	return 0;
2989 }
2990 
2991 struct pipe_crc_info {
2992 	const char *name;
2993 	struct drm_device *dev;
2994 	enum pipe pipe;
2995 };
2996 
2997 static int i915_dp_mst_info(struct seq_file *m, void *unused)
2998 {
2999 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3000 	struct drm_device *dev = node->minor->dev;
3001 	struct drm_encoder *encoder;
3002 	struct intel_encoder *intel_encoder;
3003 	struct intel_digital_port *intel_dig_port;
3004 	drm_modeset_lock_all(dev);
3005 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3006 		intel_encoder = to_intel_encoder(encoder);
3007 		if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
3008 			continue;
3009 		intel_dig_port = enc_to_dig_port(encoder);
3010 		if (!intel_dig_port->dp.can_mst)
3011 			continue;
3012 
3013 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3014 	}
3015 	drm_modeset_unlock_all(dev);
3016 	return 0;
3017 }
3018 
3019 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3020 {
3021 	struct pipe_crc_info *info = inode->i_private;
3022 	struct drm_i915_private *dev_priv = info->dev->dev_private;
3023 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3024 
3025 	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
3026 		return -ENODEV;
3027 
3028 	spin_lock_irq(&pipe_crc->lock);
3029 
3030 	if (pipe_crc->opened) {
3031 		spin_unlock_irq(&pipe_crc->lock);
3032 		return -EBUSY; /* already open */
3033 	}
3034 
3035 	pipe_crc->opened = true;
3036 	filep->private_data = inode->i_private;
3037 
3038 	spin_unlock_irq(&pipe_crc->lock);
3039 
3040 	return 0;
3041 }
3042 
3043 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3044 {
3045 	struct pipe_crc_info *info = inode->i_private;
3046 	struct drm_i915_private *dev_priv = info->dev->dev_private;
3047 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3048 
3049 	spin_lock_irq(&pipe_crc->lock);
3050 	pipe_crc->opened = false;
3051 	spin_unlock_irq(&pipe_crc->lock);
3052 
3053 	return 0;
3054 }
3055 
3056 /* (6 fields, 8 chars each, space separated (5) + '\n') */
3057 #define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
3058 /* account for \'0' */
3059 #define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
3060 
3061 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
3062 {
3063 	assert_spin_locked(&pipe_crc->lock);
3064 	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3065 			INTEL_PIPE_CRC_ENTRIES_NR);
3066 }
3067 
3068 static ssize_t
3069 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3070 		   loff_t *pos)
3071 {
3072 	struct pipe_crc_info *info = filep->private_data;
3073 	struct drm_device *dev = info->dev;
3074 	struct drm_i915_private *dev_priv = dev->dev_private;
3075 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3076 	char buf[PIPE_CRC_BUFFER_LEN];
3077 	int n_entries;
3078 	ssize_t bytes_read;
3079 
3080 	/*
3081 	 * Don't allow user space to provide buffers not big enough to hold
3082 	 * a line of data.
3083 	 */
3084 	if (count < PIPE_CRC_LINE_LEN)
3085 		return -EINVAL;
3086 
3087 	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
3088 		return 0;
3089 
3090 	/* nothing to read */
3091 	spin_lock_irq(&pipe_crc->lock);
3092 	while (pipe_crc_data_count(pipe_crc) == 0) {
3093 		int ret;
3094 
3095 		if (filep->f_flags & O_NONBLOCK) {
3096 			spin_unlock_irq(&pipe_crc->lock);
3097 			return -EAGAIN;
3098 		}
3099 
3100 		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
3101 				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
3102 		if (ret) {
3103 			spin_unlock_irq(&pipe_crc->lock);
3104 			return ret;
3105 		}
3106 	}
3107 
3108 	/* We now have one or more entries to read */
3109 	n_entries = count / PIPE_CRC_LINE_LEN;
3110 
3111 	bytes_read = 0;
3112 	while (n_entries > 0) {
3113 		struct intel_pipe_crc_entry *entry =
3114 			&pipe_crc->entries[pipe_crc->tail];
3115 		int ret;
3116 
3117 		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3118 			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
3119 			break;
3120 
3121 		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
3122 		pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
3123 
3124 		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
3125 				       "%8u %8x %8x %8x %8x %8x\n",
3126 				       entry->frame, entry->crc[0],
3127 				       entry->crc[1], entry->crc[2],
3128 				       entry->crc[3], entry->crc[4]);
3129 
3130 		spin_unlock_irq(&pipe_crc->lock);
3131 
3132 		ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
3133 		if (ret == PIPE_CRC_LINE_LEN)
3134 			return -EFAULT;
3135 
3136 		user_buf += PIPE_CRC_LINE_LEN;
3137 		n_entries--;
3138 
3139 		spin_lock_irq(&pipe_crc->lock);
3140 	}
3141 
3142 	spin_unlock_irq(&pipe_crc->lock);
3143 
3144 	return bytes_read;
3145 }
3146 
3147 static const struct file_operations i915_pipe_crc_fops = {
3148 	.owner = THIS_MODULE,
3149 	.open = i915_pipe_crc_open,
3150 	.read = i915_pipe_crc_read,
3151 	.release = i915_pipe_crc_release,
3152 };
3153 
3154 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
3155 	{
3156 		.name = "i915_pipe_A_crc",
3157 		.pipe = PIPE_A,
3158 	},
3159 	{
3160 		.name = "i915_pipe_B_crc",
3161 		.pipe = PIPE_B,
3162 	},
3163 	{
3164 		.name = "i915_pipe_C_crc",
3165 		.pipe = PIPE_C,
3166 	},
3167 };
3168 
3169 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
3170 				enum pipe pipe)
3171 {
3172 	struct drm_device *dev = minor->dev;
3173 	struct dentry *ent;
3174 	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
3175 
3176 	info->dev = dev;
3177 	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
3178 				  &i915_pipe_crc_fops);
3179 	if (!ent)
3180 		return -ENOMEM;
3181 
3182 	return drm_add_fake_info_node(minor, ent, info);
3183 }
3184 
3185 static const char * const pipe_crc_sources[] = {
3186 	"none",
3187 	"plane1",
3188 	"plane2",
3189 	"pf",
3190 	"pipe",
3191 	"TV",
3192 	"DP-B",
3193 	"DP-C",
3194 	"DP-D",
3195 	"auto",
3196 };
3197 
3198 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3199 {
3200 	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
3201 	return pipe_crc_sources[source];
3202 }
3203 
3204 static int display_crc_ctl_show(struct seq_file *m, void *data)
3205 {
3206 	struct drm_device *dev = m->private;
3207 	struct drm_i915_private *dev_priv = dev->dev_private;
3208 	int i;
3209 
3210 	for (i = 0; i < I915_MAX_PIPES; i++)
3211 		seq_printf(m, "%c %s\n", pipe_name(i),
3212 			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
3213 
3214 	return 0;
3215 }
3216 
3217 static int display_crc_ctl_open(struct inode *inode, struct file *file)
3218 {
3219 	struct drm_device *dev = inode->i_private;
3220 
3221 	return single_open(file, display_crc_ctl_show, dev);
3222 }
3223 
3224 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3225 				 uint32_t *val)
3226 {
3227 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3228 		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3229 
3230 	switch (*source) {
3231 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3232 		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
3233 		break;
3234 	case INTEL_PIPE_CRC_SOURCE_NONE:
3235 		*val = 0;
3236 		break;
3237 	default:
3238 		return -EINVAL;
3239 	}
3240 
3241 	return 0;
3242 }
3243 
3244 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
3245 				     enum intel_pipe_crc_source *source)
3246 {
3247 	struct intel_encoder *encoder;
3248 	struct intel_crtc *crtc;
3249 	struct intel_digital_port *dig_port;
3250 	int ret = 0;
3251 
3252 	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3253 
3254 	drm_modeset_lock_all(dev);
3255 	for_each_intel_encoder(dev, encoder) {
3256 		if (!encoder->base.crtc)
3257 			continue;
3258 
3259 		crtc = to_intel_crtc(encoder->base.crtc);
3260 
3261 		if (crtc->pipe != pipe)
3262 			continue;
3263 
3264 		switch (encoder->type) {
3265 		case INTEL_OUTPUT_TVOUT:
3266 			*source = INTEL_PIPE_CRC_SOURCE_TV;
3267 			break;
3268 		case INTEL_OUTPUT_DISPLAYPORT:
3269 		case INTEL_OUTPUT_EDP:
3270 			dig_port = enc_to_dig_port(&encoder->base);
3271 			switch (dig_port->port) {
3272 			case PORT_B:
3273 				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
3274 				break;
3275 			case PORT_C:
3276 				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
3277 				break;
3278 			case PORT_D:
3279 				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
3280 				break;
3281 			default:
3282 				WARN(1, "nonexisting DP port %c\n",
3283 				     port_name(dig_port->port));
3284 				break;
3285 			}
3286 			break;
3287 		default:
3288 			break;
3289 		}
3290 	}
3291 	drm_modeset_unlock_all(dev);
3292 
3293 	return ret;
3294 }
3295 
3296 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
3297 				enum pipe pipe,
3298 				enum intel_pipe_crc_source *source,
3299 				uint32_t *val)
3300 {
3301 	struct drm_i915_private *dev_priv = dev->dev_private;
3302 	bool need_stable_symbols = false;
3303 
3304 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3305 		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3306 		if (ret)
3307 			return ret;
3308 	}
3309 
3310 	switch (*source) {
3311 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3312 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
3313 		break;
3314 	case INTEL_PIPE_CRC_SOURCE_DP_B:
3315 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3316 		need_stable_symbols = true;
3317 		break;
3318 	case INTEL_PIPE_CRC_SOURCE_DP_C:
3319 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3320 		need_stable_symbols = true;
3321 		break;
3322 	case INTEL_PIPE_CRC_SOURCE_DP_D:
3323 		if (!IS_CHERRYVIEW(dev))
3324 			return -EINVAL;
3325 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
3326 		need_stable_symbols = true;
3327 		break;
3328 	case INTEL_PIPE_CRC_SOURCE_NONE:
3329 		*val = 0;
3330 		break;
3331 	default:
3332 		return -EINVAL;
3333 	}
3334 
3335 	/*
3336 	 * When the pipe CRC tap point is after the transcoders we need
3337 	 * to tweak symbol-level features to produce a deterministic series of
3338 	 * symbols for a given frame. We need to reset those features only once
3339 	 * a frame (instead of every nth symbol):
3340 	 *   - DC-balance: used to ensure a better clock recovery from the data
3341 	 *     link (SDVO)
3342 	 *   - DisplayPort scrambling: used for EMI reduction
3343 	 */
3344 	if (need_stable_symbols) {
3345 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3346 
3347 		tmp |= DC_BALANCE_RESET_VLV;
3348 		switch (pipe) {
3349 		case PIPE_A:
3350 			tmp |= PIPE_A_SCRAMBLE_RESET;
3351 			break;
3352 		case PIPE_B:
3353 			tmp |= PIPE_B_SCRAMBLE_RESET;
3354 			break;
3355 		case PIPE_C:
3356 			tmp |= PIPE_C_SCRAMBLE_RESET;
3357 			break;
3358 		default:
3359 			return -EINVAL;
3360 		}
3361 		I915_WRITE(PORT_DFT2_G4X, tmp);
3362 	}
3363 
3364 	return 0;
3365 }
3366 
3367 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3368 				 enum pipe pipe,
3369 				 enum intel_pipe_crc_source *source,
3370 				 uint32_t *val)
3371 {
3372 	struct drm_i915_private *dev_priv = dev->dev_private;
3373 	bool need_stable_symbols = false;
3374 
3375 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3376 		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3377 		if (ret)
3378 			return ret;
3379 	}
3380 
3381 	switch (*source) {
3382 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3383 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3384 		break;
3385 	case INTEL_PIPE_CRC_SOURCE_TV:
3386 		if (!SUPPORTS_TV(dev))
3387 			return -EINVAL;
3388 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3389 		break;
3390 	case INTEL_PIPE_CRC_SOURCE_DP_B:
3391 		if (!IS_G4X(dev))
3392 			return -EINVAL;
3393 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3394 		need_stable_symbols = true;
3395 		break;
3396 	case INTEL_PIPE_CRC_SOURCE_DP_C:
3397 		if (!IS_G4X(dev))
3398 			return -EINVAL;
3399 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3400 		need_stable_symbols = true;
3401 		break;
3402 	case INTEL_PIPE_CRC_SOURCE_DP_D:
3403 		if (!IS_G4X(dev))
3404 			return -EINVAL;
3405 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3406 		need_stable_symbols = true;
3407 		break;
3408 	case INTEL_PIPE_CRC_SOURCE_NONE:
3409 		*val = 0;
3410 		break;
3411 	default:
3412 		return -EINVAL;
3413 	}
3414 
3415 	/*
3416 	 * When the pipe CRC tap point is after the transcoders we need
3417 	 * to tweak symbol-level features to produce a deterministic series of
3418 	 * symbols for a given frame. We need to reset those features only once
3419 	 * a frame (instead of every nth symbol):
3420 	 *   - DC-balance: used to ensure a better clock recovery from the data
3421 	 *     link (SDVO)
3422 	 *   - DisplayPort scrambling: used for EMI reduction
3423 	 */
3424 	if (need_stable_symbols) {
3425 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3426 
3427 		WARN_ON(!IS_G4X(dev));
3428 
3429 		I915_WRITE(PORT_DFT_I9XX,
3430 			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3431 
3432 		if (pipe == PIPE_A)
3433 			tmp |= PIPE_A_SCRAMBLE_RESET;
3434 		else
3435 			tmp |= PIPE_B_SCRAMBLE_RESET;
3436 
3437 		I915_WRITE(PORT_DFT2_G4X, tmp);
3438 	}
3439 
3440 	return 0;
3441 }
3442 
3443 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3444 					 enum pipe pipe)
3445 {
3446 	struct drm_i915_private *dev_priv = dev->dev_private;
3447 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3448 
3449 	switch (pipe) {
3450 	case PIPE_A:
3451 		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3452 		break;
3453 	case PIPE_B:
3454 		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3455 		break;
3456 	case PIPE_C:
3457 		tmp &= ~PIPE_C_SCRAMBLE_RESET;
3458 		break;
3459 	default:
3460 		return;
3461 	}
3462 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
3463 		tmp &= ~DC_BALANCE_RESET_VLV;
3464 	I915_WRITE(PORT_DFT2_G4X, tmp);
3465 
3466 }
3467 
3468 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3469 					 enum pipe pipe)
3470 {
3471 	struct drm_i915_private *dev_priv = dev->dev_private;
3472 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3473 
3474 	if (pipe == PIPE_A)
3475 		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3476 	else
3477 		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3478 	I915_WRITE(PORT_DFT2_G4X, tmp);
3479 
3480 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
3481 		I915_WRITE(PORT_DFT_I9XX,
3482 			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
3483 	}
3484 }
3485 
3486 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3487 				uint32_t *val)
3488 {
3489 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3490 		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3491 
3492 	switch (*source) {
3493 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3494 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
3495 		break;
3496 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3497 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
3498 		break;
3499 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3500 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
3501 		break;
3502 	case INTEL_PIPE_CRC_SOURCE_NONE:
3503 		*val = 0;
3504 		break;
3505 	default:
3506 		return -EINVAL;
3507 	}
3508 
3509 	return 0;
3510 }
3511 
3512 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
3513 {
3514 	struct drm_i915_private *dev_priv = dev->dev_private;
3515 	struct intel_crtc *crtc =
3516 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3517 
3518 	drm_modeset_lock_all(dev);
3519 	/*
3520 	 * If we use the eDP transcoder we need to make sure that we don't
3521 	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3522 	 * relevant on hsw with pipe A when using the always-on power well
3523 	 * routing.
3524 	 */
3525 	if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
3526 	    !crtc->config->pch_pfit.enabled) {
3527 		crtc->config->pch_pfit.force_thru = true;
3528 
3529 		intel_display_power_get(dev_priv,
3530 					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
3531 
3532 		dev_priv->display.crtc_disable(&crtc->base);
3533 		dev_priv->display.crtc_enable(&crtc->base);
3534 	}
3535 	drm_modeset_unlock_all(dev);
3536 }
3537 
3538 static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
3539 {
3540 	struct drm_i915_private *dev_priv = dev->dev_private;
3541 	struct intel_crtc *crtc =
3542 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3543 
3544 	drm_modeset_lock_all(dev);
3545 	/*
3546 	 * If we use the eDP transcoder we need to make sure that we don't
3547 	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3548 	 * relevant on hsw with pipe A when using the always-on power well
3549 	 * routing.
3550 	 */
3551 	if (crtc->config->pch_pfit.force_thru) {
3552 		crtc->config->pch_pfit.force_thru = false;
3553 
3554 		dev_priv->display.crtc_disable(&crtc->base);
3555 		dev_priv->display.crtc_enable(&crtc->base);
3556 
3557 		intel_display_power_put(dev_priv,
3558 					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
3559 	}
3560 	drm_modeset_unlock_all(dev);
3561 }
3562 
3563 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
3564 				enum pipe pipe,
3565 				enum intel_pipe_crc_source *source,
3566 				uint32_t *val)
3567 {
3568 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3569 		*source = INTEL_PIPE_CRC_SOURCE_PF;
3570 
3571 	switch (*source) {
3572 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3573 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
3574 		break;
3575 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3576 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
3577 		break;
3578 	case INTEL_PIPE_CRC_SOURCE_PF:
3579 		if (IS_HASWELL(dev) && pipe == PIPE_A)
3580 			hsw_trans_edp_pipe_A_crc_wa(dev);
3581 
3582 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
3583 		break;
3584 	case INTEL_PIPE_CRC_SOURCE_NONE:
3585 		*val = 0;
3586 		break;
3587 	default:
3588 		return -EINVAL;
3589 	}
3590 
3591 	return 0;
3592 }
3593 
3594 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3595 			       enum intel_pipe_crc_source source)
3596 {
3597 	struct drm_i915_private *dev_priv = dev->dev_private;
3598 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3599 	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3600 									pipe));
3601 	u32 val = 0; /* shut up gcc */
3602 	int ret;
3603 
3604 	if (pipe_crc->source == source)
3605 		return 0;
3606 
3607 	/* forbid changing the source without going back to 'none' */
3608 	if (pipe_crc->source && source)
3609 		return -EINVAL;
3610 
3611 	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
3612 		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
3613 		return -EIO;
3614 	}
3615 
3616 	if (IS_GEN2(dev))
3617 		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
3618 	else if (INTEL_INFO(dev)->gen < 5)
3619 		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3620 	else if (IS_VALLEYVIEW(dev))
3621 		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3622 	else if (IS_GEN5(dev) || IS_GEN6(dev))
3623 		ret = ilk_pipe_crc_ctl_reg(&source, &val);
3624 	else
3625 		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3626 
3627 	if (ret != 0)
3628 		return ret;
3629 
3630 	/* none -> real source transition */
3631 	if (source) {
3632 		struct intel_pipe_crc_entry *entries;
3633 
3634 		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
3635 				 pipe_name(pipe), pipe_crc_source_name(source));
3636 
3637 		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
3638 				  sizeof(pipe_crc->entries[0]),
3639 				  GFP_KERNEL);
3640 		if (!entries)
3641 			return -ENOMEM;
3642 
3643 		/*
3644 		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3645 		 * enabled and disabled dynamically based on package C states,
3646 		 * user space can't make reliable use of the CRCs, so let's just
3647 		 * completely disable it.
3648 		 */
3649 		hsw_disable_ips(crtc);
3650 
3651 		spin_lock_irq(&pipe_crc->lock);
3652 		kfree(pipe_crc->entries);
3653 		pipe_crc->entries = entries;
3654 		pipe_crc->head = 0;
3655 		pipe_crc->tail = 0;
3656 		spin_unlock_irq(&pipe_crc->lock);
3657 	}
3658 
3659 	pipe_crc->source = source;
3660 
3661 	I915_WRITE(PIPE_CRC_CTL(pipe), val);
3662 	POSTING_READ(PIPE_CRC_CTL(pipe));
3663 
3664 	/* real source -> none transition */
3665 	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
3666 		struct intel_pipe_crc_entry *entries;
3667 		struct intel_crtc *crtc =
3668 			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
3669 
3670 		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
3671 				 pipe_name(pipe));
3672 
3673 		drm_modeset_lock(&crtc->base.mutex, NULL);
3674 		if (crtc->active)
3675 			intel_wait_for_vblank(dev, pipe);
3676 		drm_modeset_unlock(&crtc->base.mutex);
3677 
3678 		spin_lock_irq(&pipe_crc->lock);
3679 		entries = pipe_crc->entries;
3680 		pipe_crc->entries = NULL;
3681 		pipe_crc->head = 0;
3682 		pipe_crc->tail = 0;
3683 		spin_unlock_irq(&pipe_crc->lock);
3684 
3685 		kfree(entries);
3686 
3687 		if (IS_G4X(dev))
3688 			g4x_undo_pipe_scramble_reset(dev, pipe);
3689 		else if (IS_VALLEYVIEW(dev))
3690 			vlv_undo_pipe_scramble_reset(dev, pipe);
3691 		else if (IS_HASWELL(dev) && pipe == PIPE_A)
3692 			hsw_undo_trans_edp_pipe_A_crc_wa(dev);
3693 
3694 		hsw_enable_ips(crtc);
3695 	}
3696 
3697 	return 0;
3698 }
3699 
3700 /*
3701  * Parse pipe CRC command strings:
3702  *   command: wsp* object wsp+ name wsp+ source wsp*
3703  *   object: 'pipe'
3704  *   name: (A | B | C)
3705  *   source: (none | plane1 | plane2 | pf)
3706  *   wsp: (#0x20 | #0x9 | #0xA)+
3707  *
3708  * eg.:
3709  *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
3710  *  "pipe A none"    ->  Stop CRC
3711  */
3712 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
3713 {
3714 	int n_words = 0;
3715 
3716 	while (*buf) {
3717 		char *end;
3718 
3719 		/* skip leading white space */
3720 		buf = skip_spaces(buf);
3721 		if (!*buf)
3722 			break;	/* end of buffer */
3723 
3724 		/* find end of word */
3725 		for (end = buf; *end && !isspace(*end); end++)
3726 			;
3727 
3728 		if (n_words == max_words) {
3729 			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
3730 					 max_words);
3731 			return -EINVAL;	/* ran out of words[] before bytes */
3732 		}
3733 
3734 		if (*end)
3735 			*end++ = '\0';
3736 		words[n_words++] = buf;
3737 		buf = end;
3738 	}
3739 
3740 	return n_words;
3741 }
3742 
3743 enum intel_pipe_crc_object {
3744 	PIPE_CRC_OBJECT_PIPE,
3745 };
3746 
3747 static const char * const pipe_crc_objects[] = {
3748 	"pipe",
3749 };
3750 
3751 static int
3752 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
3753 {
3754 	int i;
3755 
3756 	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
3757 		if (!strcmp(buf, pipe_crc_objects[i])) {
3758 			*o = i;
3759 			return 0;
3760 		    }
3761 
3762 	return -EINVAL;
3763 }
3764 
3765 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
3766 {
3767 	const char name = buf[0];
3768 
3769 	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
3770 		return -EINVAL;
3771 
3772 	*pipe = name - 'A';
3773 
3774 	return 0;
3775 }
3776 
3777 static int
3778 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
3779 {
3780 	int i;
3781 
3782 	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
3783 		if (!strcmp(buf, pipe_crc_sources[i])) {
3784 			*s = i;
3785 			return 0;
3786 		    }
3787 
3788 	return -EINVAL;
3789 }
3790 
3791 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
3792 {
3793 #define N_WORDS 3
3794 	int n_words;
3795 	char *words[N_WORDS];
3796 	enum pipe pipe;
3797 	enum intel_pipe_crc_object object;
3798 	enum intel_pipe_crc_source source;
3799 
3800 	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
3801 	if (n_words != N_WORDS) {
3802 		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3803 				 N_WORDS);
3804 		return -EINVAL;
3805 	}
3806 
3807 	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
3808 		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
3809 		return -EINVAL;
3810 	}
3811 
3812 	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
3813 		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
3814 		return -EINVAL;
3815 	}
3816 
3817 	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
3818 		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
3819 		return -EINVAL;
3820 	}
3821 
3822 	return pipe_crc_set_source(dev, pipe, source);
3823 }
3824 
3825 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
3826 				     size_t len, loff_t *offp)
3827 {
3828 	struct seq_file *m = file->private_data;
3829 	struct drm_device *dev = m->private;
3830 	char *tmpbuf;
3831 	int ret;
3832 
3833 	if (len == 0)
3834 		return 0;
3835 
3836 	if (len > PAGE_SIZE - 1) {
3837 		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3838 				 PAGE_SIZE);
3839 		return -E2BIG;
3840 	}
3841 
3842 	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
3843 	if (!tmpbuf)
3844 		return -ENOMEM;
3845 
3846 	if (copy_from_user(tmpbuf, ubuf, len)) {
3847 		ret = -EFAULT;
3848 		goto out;
3849 	}
3850 	tmpbuf[len] = '\0';
3851 
3852 	ret = display_crc_ctl_parse(dev, tmpbuf, len);
3853 
3854 out:
3855 	kfree(tmpbuf);
3856 	if (ret < 0)
3857 		return ret;
3858 
3859 	*offp += len;
3860 	return len;
3861 }
3862 
3863 static const struct file_operations i915_display_crc_ctl_fops = {
3864 	.owner = THIS_MODULE,
3865 	.open = display_crc_ctl_open,
3866 	.read = seq_read,
3867 	.llseek = seq_lseek,
3868 	.release = single_release,
3869 	.write = display_crc_ctl_write
3870 };
3871 
3872 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3873 {
3874 	struct drm_device *dev = m->private;
3875 	int num_levels = ilk_wm_max_level(dev) + 1;
3876 	int level;
3877 
3878 	drm_modeset_lock_all(dev);
3879 
3880 	for (level = 0; level < num_levels; level++) {
3881 		unsigned int latency = wm[level];
3882 
3883 		/*
3884 		 * - WM1+ latency values in 0.5us units
3885 		 * - latencies are in us on gen9
3886 		 */
3887 		if (INTEL_INFO(dev)->gen >= 9)
3888 			latency *= 10;
3889 		else if (level > 0)
3890 			latency *= 5;
3891 
3892 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3893 			   level, wm[level], latency / 10, latency % 10);
3894 	}
3895 
3896 	drm_modeset_unlock_all(dev);
3897 }
3898 
3899 static int pri_wm_latency_show(struct seq_file *m, void *data)
3900 {
3901 	struct drm_device *dev = m->private;
3902 	struct drm_i915_private *dev_priv = dev->dev_private;
3903 	const uint16_t *latencies;
3904 
3905 	if (INTEL_INFO(dev)->gen >= 9)
3906 		latencies = dev_priv->wm.skl_latency;
3907 	else
3908 		latencies = to_i915(dev)->wm.pri_latency;
3909 
3910 	wm_latency_show(m, latencies);
3911 
3912 	return 0;
3913 }
3914 
3915 static int spr_wm_latency_show(struct seq_file *m, void *data)
3916 {
3917 	struct drm_device *dev = m->private;
3918 	struct drm_i915_private *dev_priv = dev->dev_private;
3919 	const uint16_t *latencies;
3920 
3921 	if (INTEL_INFO(dev)->gen >= 9)
3922 		latencies = dev_priv->wm.skl_latency;
3923 	else
3924 		latencies = to_i915(dev)->wm.spr_latency;
3925 
3926 	wm_latency_show(m, latencies);
3927 
3928 	return 0;
3929 }
3930 
3931 static int cur_wm_latency_show(struct seq_file *m, void *data)
3932 {
3933 	struct drm_device *dev = m->private;
3934 	struct drm_i915_private *dev_priv = dev->dev_private;
3935 	const uint16_t *latencies;
3936 
3937 	if (INTEL_INFO(dev)->gen >= 9)
3938 		latencies = dev_priv->wm.skl_latency;
3939 	else
3940 		latencies = to_i915(dev)->wm.cur_latency;
3941 
3942 	wm_latency_show(m, latencies);
3943 
3944 	return 0;
3945 }
3946 
3947 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3948 {
3949 	struct drm_device *dev = inode->i_private;
3950 
3951 	if (HAS_GMCH_DISPLAY(dev))
3952 		return -ENODEV;
3953 
3954 	return single_open(file, pri_wm_latency_show, dev);
3955 }
3956 
3957 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3958 {
3959 	struct drm_device *dev = inode->i_private;
3960 
3961 	if (HAS_GMCH_DISPLAY(dev))
3962 		return -ENODEV;
3963 
3964 	return single_open(file, spr_wm_latency_show, dev);
3965 }
3966 
3967 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3968 {
3969 	struct drm_device *dev = inode->i_private;
3970 
3971 	if (HAS_GMCH_DISPLAY(dev))
3972 		return -ENODEV;
3973 
3974 	return single_open(file, cur_wm_latency_show, dev);
3975 }
3976 
3977 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3978 				size_t len, loff_t *offp, uint16_t wm[8])
3979 {
3980 	struct seq_file *m = file->private_data;
3981 	struct drm_device *dev = m->private;
3982 	uint16_t new[8] = { 0 };
3983 	int num_levels = ilk_wm_max_level(dev) + 1;
3984 	int level;
3985 	int ret;
3986 	char tmp[32];
3987 
3988 	if (len >= sizeof(tmp))
3989 		return -EINVAL;
3990 
3991 	if (copy_from_user(tmp, ubuf, len))
3992 		return -EFAULT;
3993 
3994 	tmp[len] = '\0';
3995 
3996 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3997 		     &new[0], &new[1], &new[2], &new[3],
3998 		     &new[4], &new[5], &new[6], &new[7]);
3999 	if (ret != num_levels)
4000 		return -EINVAL;
4001 
4002 	drm_modeset_lock_all(dev);
4003 
4004 	for (level = 0; level < num_levels; level++)
4005 		wm[level] = new[level];
4006 
4007 	drm_modeset_unlock_all(dev);
4008 
4009 	return len;
4010 }
4011 
4012 
4013 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4014 				    size_t len, loff_t *offp)
4015 {
4016 	struct seq_file *m = file->private_data;
4017 	struct drm_device *dev = m->private;
4018 	struct drm_i915_private *dev_priv = dev->dev_private;
4019 	uint16_t *latencies;
4020 
4021 	if (INTEL_INFO(dev)->gen >= 9)
4022 		latencies = dev_priv->wm.skl_latency;
4023 	else
4024 		latencies = to_i915(dev)->wm.pri_latency;
4025 
4026 	return wm_latency_write(file, ubuf, len, offp, latencies);
4027 }
4028 
4029 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4030 				    size_t len, loff_t *offp)
4031 {
4032 	struct seq_file *m = file->private_data;
4033 	struct drm_device *dev = m->private;
4034 	struct drm_i915_private *dev_priv = dev->dev_private;
4035 	uint16_t *latencies;
4036 
4037 	if (INTEL_INFO(dev)->gen >= 9)
4038 		latencies = dev_priv->wm.skl_latency;
4039 	else
4040 		latencies = to_i915(dev)->wm.spr_latency;
4041 
4042 	return wm_latency_write(file, ubuf, len, offp, latencies);
4043 }
4044 
4045 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4046 				    size_t len, loff_t *offp)
4047 {
4048 	struct seq_file *m = file->private_data;
4049 	struct drm_device *dev = m->private;
4050 	struct drm_i915_private *dev_priv = dev->dev_private;
4051 	uint16_t *latencies;
4052 
4053 	if (INTEL_INFO(dev)->gen >= 9)
4054 		latencies = dev_priv->wm.skl_latency;
4055 	else
4056 		latencies = to_i915(dev)->wm.cur_latency;
4057 
4058 	return wm_latency_write(file, ubuf, len, offp, latencies);
4059 }
4060 
4061 static const struct file_operations i915_pri_wm_latency_fops = {
4062 	.owner = THIS_MODULE,
4063 	.open = pri_wm_latency_open,
4064 	.read = seq_read,
4065 	.llseek = seq_lseek,
4066 	.release = single_release,
4067 	.write = pri_wm_latency_write
4068 };
4069 
4070 static const struct file_operations i915_spr_wm_latency_fops = {
4071 	.owner = THIS_MODULE,
4072 	.open = spr_wm_latency_open,
4073 	.read = seq_read,
4074 	.llseek = seq_lseek,
4075 	.release = single_release,
4076 	.write = spr_wm_latency_write
4077 };
4078 
4079 static const struct file_operations i915_cur_wm_latency_fops = {
4080 	.owner = THIS_MODULE,
4081 	.open = cur_wm_latency_open,
4082 	.read = seq_read,
4083 	.llseek = seq_lseek,
4084 	.release = single_release,
4085 	.write = cur_wm_latency_write
4086 };
4087 
4088 static int
4089 i915_wedged_get(void *data, u64 *val)
4090 {
4091 	struct drm_device *dev = data;
4092 	struct drm_i915_private *dev_priv = dev->dev_private;
4093 
4094 	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
4095 
4096 	return 0;
4097 }
4098 
4099 static int
4100 i915_wedged_set(void *data, u64 val)
4101 {
4102 	struct drm_device *dev = data;
4103 	struct drm_i915_private *dev_priv = dev->dev_private;
4104 
4105 	/*
4106 	 * There is no safeguard against this debugfs entry colliding
4107 	 * with the hangcheck calling same i915_handle_error() in
4108 	 * parallel, causing an explosion. For now we assume that the
4109 	 * test harness is responsible enough not to inject gpu hangs
4110 	 * while it is writing to 'i915_wedged'
4111 	 */
4112 
4113 	if (i915_reset_in_progress(&dev_priv->gpu_error))
4114 		return -EAGAIN;
4115 
4116 	intel_runtime_pm_get(dev_priv);
4117 
4118 	i915_handle_error(dev, val,
4119 			  "Manually setting wedged to %llu", val);
4120 
4121 	intel_runtime_pm_put(dev_priv);
4122 
4123 	return 0;
4124 }
4125 
4126 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4127 			i915_wedged_get, i915_wedged_set,
4128 			"%llu\n");
4129 
4130 static int
4131 i915_ring_stop_get(void *data, u64 *val)
4132 {
4133 	struct drm_device *dev = data;
4134 	struct drm_i915_private *dev_priv = dev->dev_private;
4135 
4136 	*val = dev_priv->gpu_error.stop_rings;
4137 
4138 	return 0;
4139 }
4140 
4141 static int
4142 i915_ring_stop_set(void *data, u64 val)
4143 {
4144 	struct drm_device *dev = data;
4145 	struct drm_i915_private *dev_priv = dev->dev_private;
4146 	int ret;
4147 
4148 	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4149 
4150 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4151 	if (ret)
4152 		return ret;
4153 
4154 	dev_priv->gpu_error.stop_rings = val;
4155 	mutex_unlock(&dev->struct_mutex);
4156 
4157 	return 0;
4158 }
4159 
4160 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
4161 			i915_ring_stop_get, i915_ring_stop_set,
4162 			"0x%08llx\n");
4163 
4164 static int
4165 i915_ring_missed_irq_get(void *data, u64 *val)
4166 {
4167 	struct drm_device *dev = data;
4168 	struct drm_i915_private *dev_priv = dev->dev_private;
4169 
4170 	*val = dev_priv->gpu_error.missed_irq_rings;
4171 	return 0;
4172 }
4173 
4174 static int
4175 i915_ring_missed_irq_set(void *data, u64 val)
4176 {
4177 	struct drm_device *dev = data;
4178 	struct drm_i915_private *dev_priv = dev->dev_private;
4179 	int ret;
4180 
4181 	/* Lock against concurrent debugfs callers */
4182 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4183 	if (ret)
4184 		return ret;
4185 	dev_priv->gpu_error.missed_irq_rings = val;
4186 	mutex_unlock(&dev->struct_mutex);
4187 
4188 	return 0;
4189 }
4190 
4191 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4192 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4193 			"0x%08llx\n");
4194 
4195 static int
4196 i915_ring_test_irq_get(void *data, u64 *val)
4197 {
4198 	struct drm_device *dev = data;
4199 	struct drm_i915_private *dev_priv = dev->dev_private;
4200 
4201 	*val = dev_priv->gpu_error.test_irq_rings;
4202 
4203 	return 0;
4204 }
4205 
4206 static int
4207 i915_ring_test_irq_set(void *data, u64 val)
4208 {
4209 	struct drm_device *dev = data;
4210 	struct drm_i915_private *dev_priv = dev->dev_private;
4211 	int ret;
4212 
4213 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4214 
4215 	/* Lock against concurrent debugfs callers */
4216 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4217 	if (ret)
4218 		return ret;
4219 
4220 	dev_priv->gpu_error.test_irq_rings = val;
4221 	mutex_unlock(&dev->struct_mutex);
4222 
4223 	return 0;
4224 }
4225 
4226 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4227 			i915_ring_test_irq_get, i915_ring_test_irq_set,
4228 			"0x%08llx\n");
4229 
4230 #define DROP_UNBOUND 0x1
4231 #define DROP_BOUND 0x2
4232 #define DROP_RETIRE 0x4
4233 #define DROP_ACTIVE 0x8
4234 #define DROP_ALL (DROP_UNBOUND | \
4235 		  DROP_BOUND | \
4236 		  DROP_RETIRE | \
4237 		  DROP_ACTIVE)
4238 static int
4239 i915_drop_caches_get(void *data, u64 *val)
4240 {
4241 	*val = DROP_ALL;
4242 
4243 	return 0;
4244 }
4245 
4246 static int
4247 i915_drop_caches_set(void *data, u64 val)
4248 {
4249 	struct drm_device *dev = data;
4250 	struct drm_i915_private *dev_priv = dev->dev_private;
4251 	int ret;
4252 
4253 	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4254 
4255 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4256 	 * on ioctls on -EAGAIN. */
4257 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4258 	if (ret)
4259 		return ret;
4260 
4261 	if (val & DROP_ACTIVE) {
4262 		ret = i915_gpu_idle(dev);
4263 		if (ret)
4264 			goto unlock;
4265 	}
4266 
4267 	if (val & (DROP_RETIRE | DROP_ACTIVE))
4268 		i915_gem_retire_requests(dev);
4269 
4270 	if (val & DROP_BOUND)
4271 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4272 
4273 	if (val & DROP_UNBOUND)
4274 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4275 
4276 unlock:
4277 	mutex_unlock(&dev->struct_mutex);
4278 
4279 	return ret;
4280 }
4281 
4282 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4283 			i915_drop_caches_get, i915_drop_caches_set,
4284 			"0x%08llx\n");
4285 
4286 static int
4287 i915_max_freq_get(void *data, u64 *val)
4288 {
4289 	struct drm_device *dev = data;
4290 	struct drm_i915_private *dev_priv = dev->dev_private;
4291 	int ret;
4292 
4293 	if (INTEL_INFO(dev)->gen < 6)
4294 		return -ENODEV;
4295 
4296 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4297 
4298 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4299 	if (ret)
4300 		return ret;
4301 
4302 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4303 	mutex_unlock(&dev_priv->rps.hw_lock);
4304 
4305 	return 0;
4306 }
4307 
4308 static int
4309 i915_max_freq_set(void *data, u64 val)
4310 {
4311 	struct drm_device *dev = data;
4312 	struct drm_i915_private *dev_priv = dev->dev_private;
4313 	u32 hw_max, hw_min;
4314 	int ret;
4315 
4316 	if (INTEL_INFO(dev)->gen < 6)
4317 		return -ENODEV;
4318 
4319 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4320 
4321 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4322 
4323 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4324 	if (ret)
4325 		return ret;
4326 
4327 	/*
4328 	 * Turbo will still be enabled, but won't go above the set value.
4329 	 */
4330 	val = intel_freq_opcode(dev_priv, val);
4331 
4332 	hw_max = dev_priv->rps.max_freq;
4333 	hw_min = dev_priv->rps.min_freq;
4334 
4335 	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4336 		mutex_unlock(&dev_priv->rps.hw_lock);
4337 		return -EINVAL;
4338 	}
4339 
4340 	dev_priv->rps.max_freq_softlimit = val;
4341 
4342 	intel_set_rps(dev, val);
4343 
4344 	mutex_unlock(&dev_priv->rps.hw_lock);
4345 
4346 	return 0;
4347 }
4348 
4349 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4350 			i915_max_freq_get, i915_max_freq_set,
4351 			"%llu\n");
4352 
4353 static int
4354 i915_min_freq_get(void *data, u64 *val)
4355 {
4356 	struct drm_device *dev = data;
4357 	struct drm_i915_private *dev_priv = dev->dev_private;
4358 	int ret;
4359 
4360 	if (INTEL_INFO(dev)->gen < 6)
4361 		return -ENODEV;
4362 
4363 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4364 
4365 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4366 	if (ret)
4367 		return ret;
4368 
4369 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
4370 	mutex_unlock(&dev_priv->rps.hw_lock);
4371 
4372 	return 0;
4373 }
4374 
4375 static int
4376 i915_min_freq_set(void *data, u64 val)
4377 {
4378 	struct drm_device *dev = data;
4379 	struct drm_i915_private *dev_priv = dev->dev_private;
4380 	u32 hw_max, hw_min;
4381 	int ret;
4382 
4383 	if (INTEL_INFO(dev)->gen < 6)
4384 		return -ENODEV;
4385 
4386 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4387 
4388 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4389 
4390 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4391 	if (ret)
4392 		return ret;
4393 
4394 	/*
4395 	 * Turbo will still be enabled, but won't go below the set value.
4396 	 */
4397 	val = intel_freq_opcode(dev_priv, val);
4398 
4399 	hw_max = dev_priv->rps.max_freq;
4400 	hw_min = dev_priv->rps.min_freq;
4401 
4402 	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
4403 		mutex_unlock(&dev_priv->rps.hw_lock);
4404 		return -EINVAL;
4405 	}
4406 
4407 	dev_priv->rps.min_freq_softlimit = val;
4408 
4409 	intel_set_rps(dev, val);
4410 
4411 	mutex_unlock(&dev_priv->rps.hw_lock);
4412 
4413 	return 0;
4414 }
4415 
4416 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4417 			i915_min_freq_get, i915_min_freq_set,
4418 			"%llu\n");
4419 
4420 static int
4421 i915_cache_sharing_get(void *data, u64 *val)
4422 {
4423 	struct drm_device *dev = data;
4424 	struct drm_i915_private *dev_priv = dev->dev_private;
4425 	u32 snpcr;
4426 	int ret;
4427 
4428 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
4429 		return -ENODEV;
4430 
4431 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4432 	if (ret)
4433 		return ret;
4434 	intel_runtime_pm_get(dev_priv);
4435 
4436 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4437 
4438 	intel_runtime_pm_put(dev_priv);
4439 	mutex_unlock(&dev_priv->dev->struct_mutex);
4440 
4441 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4442 
4443 	return 0;
4444 }
4445 
4446 static int
4447 i915_cache_sharing_set(void *data, u64 val)
4448 {
4449 	struct drm_device *dev = data;
4450 	struct drm_i915_private *dev_priv = dev->dev_private;
4451 	u32 snpcr;
4452 
4453 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
4454 		return -ENODEV;
4455 
4456 	if (val > 3)
4457 		return -EINVAL;
4458 
4459 	intel_runtime_pm_get(dev_priv);
4460 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4461 
4462 	/* Update the cache sharing policy here as well */
4463 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4464 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
4465 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4466 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4467 
4468 	intel_runtime_pm_put(dev_priv);
4469 	return 0;
4470 }
4471 
4472 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4473 			i915_cache_sharing_get, i915_cache_sharing_set,
4474 			"%llu\n");
4475 
4476 static int i915_sseu_status(struct seq_file *m, void *unused)
4477 {
4478 	struct drm_info_node *node = (struct drm_info_node *) m->private;
4479 	struct drm_device *dev = node->minor->dev;
4480 	struct drm_i915_private *dev_priv = dev->dev_private;
4481 	unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
4482 
4483 	if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
4484 		return -ENODEV;
4485 
4486 	seq_puts(m, "SSEU Device Info\n");
4487 	seq_printf(m, "  Available Slice Total: %u\n",
4488 		   INTEL_INFO(dev)->slice_total);
4489 	seq_printf(m, "  Available Subslice Total: %u\n",
4490 		   INTEL_INFO(dev)->subslice_total);
4491 	seq_printf(m, "  Available Subslice Per Slice: %u\n",
4492 		   INTEL_INFO(dev)->subslice_per_slice);
4493 	seq_printf(m, "  Available EU Total: %u\n",
4494 		   INTEL_INFO(dev)->eu_total);
4495 	seq_printf(m, "  Available EU Per Subslice: %u\n",
4496 		   INTEL_INFO(dev)->eu_per_subslice);
4497 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4498 		   yesno(INTEL_INFO(dev)->has_slice_pg));
4499 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4500 		   yesno(INTEL_INFO(dev)->has_subslice_pg));
4501 	seq_printf(m, "  Has EU Power Gating: %s\n",
4502 		   yesno(INTEL_INFO(dev)->has_eu_pg));
4503 
4504 	seq_puts(m, "SSEU Device Status\n");
4505 	if (IS_CHERRYVIEW(dev)) {
4506 		const int ss_max = 2;
4507 		int ss;
4508 		u32 sig1[ss_max], sig2[ss_max];
4509 
4510 		sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4511 		sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4512 		sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4513 		sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4514 
4515 		for (ss = 0; ss < ss_max; ss++) {
4516 			unsigned int eu_cnt;
4517 
4518 			if (sig1[ss] & CHV_SS_PG_ENABLE)
4519 				/* skip disabled subslice */
4520 				continue;
4521 
4522 			s_tot = 1;
4523 			ss_per++;
4524 			eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4525 				 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4526 				 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4527 				 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4528 			eu_tot += eu_cnt;
4529 			eu_per = max(eu_per, eu_cnt);
4530 		}
4531 		ss_tot = ss_per;
4532 	} else if (IS_SKYLAKE(dev)) {
4533 		const int s_max = 3, ss_max = 4;
4534 		int s, ss;
4535 		u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
4536 
4537 		s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
4538 		s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
4539 		s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
4540 		eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
4541 		eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
4542 		eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
4543 		eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
4544 		eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
4545 		eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
4546 		eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4547 			     GEN9_PGCTL_SSA_EU19_ACK |
4548 			     GEN9_PGCTL_SSA_EU210_ACK |
4549 			     GEN9_PGCTL_SSA_EU311_ACK;
4550 		eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4551 			     GEN9_PGCTL_SSB_EU19_ACK |
4552 			     GEN9_PGCTL_SSB_EU210_ACK |
4553 			     GEN9_PGCTL_SSB_EU311_ACK;
4554 
4555 		for (s = 0; s < s_max; s++) {
4556 			if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4557 				/* skip disabled slice */
4558 				continue;
4559 
4560 			s_tot++;
4561 			ss_per = INTEL_INFO(dev)->subslice_per_slice;
4562 			ss_tot += ss_per;
4563 			for (ss = 0; ss < ss_max; ss++) {
4564 				unsigned int eu_cnt;
4565 
4566 				eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4567 						       eu_mask[ss%2]);
4568 				eu_tot += eu_cnt;
4569 				eu_per = max(eu_per, eu_cnt);
4570 			}
4571 		}
4572 	}
4573 	seq_printf(m, "  Enabled Slice Total: %u\n", s_tot);
4574 	seq_printf(m, "  Enabled Subslice Total: %u\n", ss_tot);
4575 	seq_printf(m, "  Enabled Subslice Per Slice: %u\n", ss_per);
4576 	seq_printf(m, "  Enabled EU Total: %u\n", eu_tot);
4577 	seq_printf(m, "  Enabled EU Per Subslice: %u\n", eu_per);
4578 
4579 	return 0;
4580 }
4581 
4582 static int i915_forcewake_open(struct inode *inode, struct file *file)
4583 {
4584 	struct drm_device *dev = inode->i_private;
4585 	struct drm_i915_private *dev_priv = dev->dev_private;
4586 
4587 	if (INTEL_INFO(dev)->gen < 6)
4588 		return 0;
4589 
4590 	intel_runtime_pm_get(dev_priv);
4591 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4592 
4593 	return 0;
4594 }
4595 
4596 static int i915_forcewake_release(struct inode *inode, struct file *file)
4597 {
4598 	struct drm_device *dev = inode->i_private;
4599 	struct drm_i915_private *dev_priv = dev->dev_private;
4600 
4601 	if (INTEL_INFO(dev)->gen < 6)
4602 		return 0;
4603 
4604 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4605 	intel_runtime_pm_put(dev_priv);
4606 
4607 	return 0;
4608 }
4609 
4610 static const struct file_operations i915_forcewake_fops = {
4611 	.owner = THIS_MODULE,
4612 	.open = i915_forcewake_open,
4613 	.release = i915_forcewake_release,
4614 };
4615 
4616 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
4617 {
4618 	struct drm_device *dev = minor->dev;
4619 	struct dentry *ent;
4620 
4621 	ent = debugfs_create_file("i915_forcewake_user",
4622 				  S_IRUSR,
4623 				  root, dev,
4624 				  &i915_forcewake_fops);
4625 	if (!ent)
4626 		return -ENOMEM;
4627 
4628 	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
4629 }
4630 
4631 static int i915_debugfs_create(struct dentry *root,
4632 			       struct drm_minor *minor,
4633 			       const char *name,
4634 			       const struct file_operations *fops)
4635 {
4636 	struct drm_device *dev = minor->dev;
4637 	struct dentry *ent;
4638 
4639 	ent = debugfs_create_file(name,
4640 				  S_IRUGO | S_IWUSR,
4641 				  root, dev,
4642 				  fops);
4643 	if (!ent)
4644 		return -ENOMEM;
4645 
4646 	return drm_add_fake_info_node(minor, ent, fops);
4647 }
4648 
4649 static const struct drm_info_list i915_debugfs_list[] = {
4650 	{"i915_capabilities", i915_capabilities, 0},
4651 	{"i915_gem_objects", i915_gem_object_info, 0},
4652 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4653 	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
4654 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
4655 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
4656 	{"i915_gem_stolen", i915_gem_stolen_list_info },
4657 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
4658 	{"i915_gem_request", i915_gem_request_info, 0},
4659 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
4660 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4661 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4662 	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
4663 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
4664 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
4665 	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
4666 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4667 	{"i915_frequency_info", i915_frequency_info, 0},
4668 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4669 	{"i915_drpc_info", i915_drpc_info, 0},
4670 	{"i915_emon_status", i915_emon_status, 0},
4671 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4672 	{"i915_fbc_status", i915_fbc_status, 0},
4673 	{"i915_ips_status", i915_ips_status, 0},
4674 	{"i915_sr_status", i915_sr_status, 0},
4675 	{"i915_opregion", i915_opregion, 0},
4676 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4677 	{"i915_context_status", i915_context_status, 0},
4678 	{"i915_dump_lrc", i915_dump_lrc, 0},
4679 	{"i915_execlists", i915_execlists, 0},
4680 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4681 	{"i915_swizzle_info", i915_swizzle_info, 0},
4682 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4683 	{"i915_llc", i915_llc, 0},
4684 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4685 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
4686 	{"i915_energy_uJ", i915_energy_uJ, 0},
4687 	{"i915_pc8_status", i915_pc8_status, 0},
4688 	{"i915_power_domain_info", i915_power_domain_info, 0},
4689 	{"i915_display_info", i915_display_info, 0},
4690 	{"i915_semaphore_status", i915_semaphore_status, 0},
4691 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4692 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4693 	{"i915_wa_registers", i915_wa_registers, 0},
4694 	{"i915_ddb_info", i915_ddb_info, 0},
4695 	{"i915_sseu_status", i915_sseu_status, 0},
4696 	{"i915_drrs_status", i915_drrs_status, 0},
4697 };
4698 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4699 
4700 static const struct i915_debugfs_files {
4701 	const char *name;
4702 	const struct file_operations *fops;
4703 } i915_debugfs_files[] = {
4704 	{"i915_wedged", &i915_wedged_fops},
4705 	{"i915_max_freq", &i915_max_freq_fops},
4706 	{"i915_min_freq", &i915_min_freq_fops},
4707 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4708 	{"i915_ring_stop", &i915_ring_stop_fops},
4709 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4710 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4711 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4712 	{"i915_error_state", &i915_error_state_fops},
4713 	{"i915_next_seqno", &i915_next_seqno_fops},
4714 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4715 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4716 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4717 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4718 	{"i915_fbc_false_color", &i915_fbc_fc_fops},
4719 };
4720 
4721 void intel_display_crc_init(struct drm_device *dev)
4722 {
4723 	struct drm_i915_private *dev_priv = dev->dev_private;
4724 	enum pipe pipe;
4725 
4726 	for_each_pipe(dev_priv, pipe) {
4727 		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4728 
4729 		pipe_crc->opened = false;
4730 		spin_lock_init(&pipe_crc->lock);
4731 		init_waitqueue_head(&pipe_crc->wq);
4732 	}
4733 }
4734 
4735 int i915_debugfs_init(struct drm_minor *minor)
4736 {
4737 	int ret, i;
4738 
4739 	ret = i915_forcewake_create(minor->debugfs_root, minor);
4740 	if (ret)
4741 		return ret;
4742 
4743 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
4744 		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
4745 		if (ret)
4746 			return ret;
4747 	}
4748 
4749 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4750 		ret = i915_debugfs_create(minor->debugfs_root, minor,
4751 					  i915_debugfs_files[i].name,
4752 					  i915_debugfs_files[i].fops);
4753 		if (ret)
4754 			return ret;
4755 	}
4756 
4757 	return drm_debugfs_create_files(i915_debugfs_list,
4758 					I915_DEBUGFS_ENTRIES,
4759 					minor->debugfs_root, minor);
4760 }
4761 
4762 void i915_debugfs_cleanup(struct drm_minor *minor)
4763 {
4764 	int i;
4765 
4766 	drm_debugfs_remove_files(i915_debugfs_list,
4767 				 I915_DEBUGFS_ENTRIES, minor);
4768 
4769 	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
4770 				 1, minor);
4771 
4772 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
4773 		struct drm_info_list *info_list =
4774 			(struct drm_info_list *)&i915_pipe_crc_data[i];
4775 
4776 		drm_debugfs_remove_files(info_list, 1, minor);
4777 	}
4778 
4779 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4780 		struct drm_info_list *info_list =
4781 			(struct drm_info_list *) i915_debugfs_files[i].fops;
4782 
4783 		drm_debugfs_remove_files(info_list, 1, minor);
4784 	}
4785 }
4786