1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
37 #include <drm/drmP.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
41 #include "i915_drv.h"
42 
43 enum {
44 	ACTIVE_LIST,
45 	INACTIVE_LIST,
46 	PINNED_LIST,
47 };
48 
49 static const char *yesno(int v)
50 {
51 	return v ? "yes" : "no";
52 }
53 
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55  * allocated we need to hook into the minor for release. */
56 static int
57 drm_add_fake_info_node(struct drm_minor *minor,
58 		       struct dentry *ent,
59 		       const void *key)
60 {
61 	struct drm_info_node *node;
62 
63 	node = kmalloc(sizeof(*node), GFP_KERNEL);
64 	if (node == NULL) {
65 		debugfs_remove(ent);
66 		return -ENOMEM;
67 	}
68 
69 	node->minor = minor;
70 	node->dent = ent;
71 	node->info_ent = (void *) key;
72 
73 	mutex_lock(&minor->debugfs_lock);
74 	list_add(&node->list, &minor->debugfs_list);
75 	mutex_unlock(&minor->debugfs_lock);
76 
77 	return 0;
78 }
79 
80 static int i915_capabilities(struct seq_file *m, void *data)
81 {
82 	struct drm_info_node *node = m->private;
83 	struct drm_device *dev = node->minor->dev;
84 	const struct intel_device_info *info = INTEL_INFO(dev);
85 
86 	seq_printf(m, "gen: %d\n", info->gen);
87 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
88 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
91 #undef PRINT_FLAG
92 #undef SEP_SEMICOLON
93 
94 	return 0;
95 }
96 
97 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
98 {
99 	if (i915_gem_obj_is_pinned(obj))
100 		return "p";
101 	else
102 		return " ";
103 }
104 
105 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
106 {
107 	switch (obj->tiling_mode) {
108 	default:
109 	case I915_TILING_NONE: return " ";
110 	case I915_TILING_X: return "X";
111 	case I915_TILING_Y: return "Y";
112 	}
113 }
114 
115 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
116 {
117 	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
118 }
119 
120 static void
121 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
122 {
123 	struct i915_vma *vma;
124 	int pin_count = 0;
125 
126 	seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %x %x %x%s%s%s",
127 		   &obj->base,
128 		   get_pin_flag(obj),
129 		   get_tiling_flag(obj),
130 		   get_global_flag(obj),
131 		   obj->base.size / 1024,
132 		   obj->base.read_domains,
133 		   obj->base.write_domain,
134 		   i915_gem_request_get_seqno(obj->last_read_req),
135 		   i915_gem_request_get_seqno(obj->last_write_req),
136 		   i915_gem_request_get_seqno(obj->last_fenced_req),
137 		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
138 		   obj->dirty ? " dirty" : "",
139 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
140 	if (obj->base.name)
141 		seq_printf(m, " (name: %d)", obj->base.name);
142 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
143 		if (vma->pin_count > 0)
144 			pin_count++;
145 	}
146 	seq_printf(m, " (pinned x %d)", pin_count);
147 	if (obj->pin_display)
148 		seq_printf(m, " (display)");
149 	if (obj->fence_reg != I915_FENCE_REG_NONE)
150 		seq_printf(m, " (fence: %d)", obj->fence_reg);
151 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
152 		if (!i915_is_ggtt(vma->vm))
153 			seq_puts(m, " (pp");
154 		else
155 			seq_puts(m, " (g");
156 		seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
157 			   vma->node.start, vma->node.size,
158 			   vma->ggtt_view.type);
159 	}
160 	if (obj->stolen)
161 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
162 	if (obj->pin_mappable || obj->fault_mappable) {
163 		char s[3], *t = s;
164 		if (obj->pin_mappable)
165 			*t++ = 'p';
166 		if (obj->fault_mappable)
167 			*t++ = 'f';
168 		*t = '\0';
169 		seq_printf(m, " (%s mappable)", s);
170 	}
171 	if (obj->last_read_req != NULL)
172 		seq_printf(m, " (%s)",
173 			   i915_gem_request_get_ring(obj->last_read_req)->name);
174 	if (obj->frontbuffer_bits)
175 		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
176 }
177 
178 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
179 {
180 	seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
181 	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
182 	seq_putc(m, ' ');
183 }
184 
185 static int i915_gem_object_list_info(struct seq_file *m, void *data)
186 {
187 	struct drm_info_node *node = m->private;
188 	uintptr_t list = (uintptr_t) node->info_ent->data;
189 	struct list_head *head;
190 	struct drm_device *dev = node->minor->dev;
191 	struct drm_i915_private *dev_priv = dev->dev_private;
192 	struct i915_address_space *vm = &dev_priv->gtt.base;
193 	struct i915_vma *vma;
194 	size_t total_obj_size, total_gtt_size;
195 	int count, ret;
196 
197 	ret = mutex_lock_interruptible(&dev->struct_mutex);
198 	if (ret)
199 		return ret;
200 
201 	/* FIXME: the user of this interface might want more than just GGTT */
202 	switch (list) {
203 	case ACTIVE_LIST:
204 		seq_puts(m, "Active:\n");
205 		head = &vm->active_list;
206 		break;
207 	case INACTIVE_LIST:
208 		seq_puts(m, "Inactive:\n");
209 		head = &vm->inactive_list;
210 		break;
211 	default:
212 		mutex_unlock(&dev->struct_mutex);
213 		return -EINVAL;
214 	}
215 
216 	total_obj_size = total_gtt_size = count = 0;
217 	list_for_each_entry(vma, head, mm_list) {
218 		seq_printf(m, "   ");
219 		describe_obj(m, vma->obj);
220 		seq_printf(m, "\n");
221 		total_obj_size += vma->obj->base.size;
222 		total_gtt_size += vma->node.size;
223 		count++;
224 	}
225 	mutex_unlock(&dev->struct_mutex);
226 
227 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
228 		   count, total_obj_size, total_gtt_size);
229 	return 0;
230 }
231 
232 static int obj_rank_by_stolen(void *priv,
233 			      struct list_head *A, struct list_head *B)
234 {
235 	struct drm_i915_gem_object *a =
236 		container_of(A, struct drm_i915_gem_object, obj_exec_link);
237 	struct drm_i915_gem_object *b =
238 		container_of(B, struct drm_i915_gem_object, obj_exec_link);
239 
240 	return a->stolen->start - b->stolen->start;
241 }
242 
243 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
244 {
245 	struct drm_info_node *node = m->private;
246 	struct drm_device *dev = node->minor->dev;
247 	struct drm_i915_private *dev_priv = dev->dev_private;
248 	struct drm_i915_gem_object *obj;
249 	size_t total_obj_size, total_gtt_size;
250 	LIST_HEAD(stolen);
251 	int count, ret;
252 
253 	ret = mutex_lock_interruptible(&dev->struct_mutex);
254 	if (ret)
255 		return ret;
256 
257 	total_obj_size = total_gtt_size = count = 0;
258 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
259 		if (obj->stolen == NULL)
260 			continue;
261 
262 		list_add(&obj->obj_exec_link, &stolen);
263 
264 		total_obj_size += obj->base.size;
265 		total_gtt_size += i915_gem_obj_ggtt_size(obj);
266 		count++;
267 	}
268 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
269 		if (obj->stolen == NULL)
270 			continue;
271 
272 		list_add(&obj->obj_exec_link, &stolen);
273 
274 		total_obj_size += obj->base.size;
275 		count++;
276 	}
277 	list_sort(NULL, &stolen, obj_rank_by_stolen);
278 	seq_puts(m, "Stolen:\n");
279 	while (!list_empty(&stolen)) {
280 		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
281 		seq_puts(m, "   ");
282 		describe_obj(m, obj);
283 		seq_putc(m, '\n');
284 		list_del_init(&obj->obj_exec_link);
285 	}
286 	mutex_unlock(&dev->struct_mutex);
287 
288 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
289 		   count, total_obj_size, total_gtt_size);
290 	return 0;
291 }
292 
293 #define count_objects(list, member) do { \
294 	list_for_each_entry(obj, list, member) { \
295 		size += i915_gem_obj_ggtt_size(obj); \
296 		++count; \
297 		if (obj->map_and_fenceable) { \
298 			mappable_size += i915_gem_obj_ggtt_size(obj); \
299 			++mappable_count; \
300 		} \
301 	} \
302 } while (0)
303 
304 struct file_stats {
305 	struct drm_i915_file_private *file_priv;
306 	int count;
307 	size_t total, unbound;
308 	size_t global, shared;
309 	size_t active, inactive;
310 };
311 
312 static int per_file_stats(int id, void *ptr, void *data)
313 {
314 	struct drm_i915_gem_object *obj = ptr;
315 	struct file_stats *stats = data;
316 	struct i915_vma *vma;
317 
318 	stats->count++;
319 	stats->total += obj->base.size;
320 
321 	if (obj->base.name || obj->base.dma_buf)
322 		stats->shared += obj->base.size;
323 
324 	if (USES_FULL_PPGTT(obj->base.dev)) {
325 		list_for_each_entry(vma, &obj->vma_list, vma_link) {
326 			struct i915_hw_ppgtt *ppgtt;
327 
328 			if (!drm_mm_node_allocated(&vma->node))
329 				continue;
330 
331 			if (i915_is_ggtt(vma->vm)) {
332 				stats->global += obj->base.size;
333 				continue;
334 			}
335 
336 			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
337 			if (ppgtt->file_priv != stats->file_priv)
338 				continue;
339 
340 			if (obj->active) /* XXX per-vma statistic */
341 				stats->active += obj->base.size;
342 			else
343 				stats->inactive += obj->base.size;
344 
345 			return 0;
346 		}
347 	} else {
348 		if (i915_gem_obj_ggtt_bound(obj)) {
349 			stats->global += obj->base.size;
350 			if (obj->active)
351 				stats->active += obj->base.size;
352 			else
353 				stats->inactive += obj->base.size;
354 			return 0;
355 		}
356 	}
357 
358 	if (!list_empty(&obj->global_list))
359 		stats->unbound += obj->base.size;
360 
361 	return 0;
362 }
363 
364 #define print_file_stats(m, name, stats) \
365 	seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \
366 		   name, \
367 		   stats.count, \
368 		   stats.total, \
369 		   stats.active, \
370 		   stats.inactive, \
371 		   stats.global, \
372 		   stats.shared, \
373 		   stats.unbound)
374 
375 static void print_batch_pool_stats(struct seq_file *m,
376 				   struct drm_i915_private *dev_priv)
377 {
378 	struct drm_i915_gem_object *obj;
379 	struct file_stats stats;
380 
381 	memset(&stats, 0, sizeof(stats));
382 
383 	list_for_each_entry(obj,
384 			    &dev_priv->mm.batch_pool.cache_list,
385 			    batch_pool_list)
386 		per_file_stats(0, obj, &stats);
387 
388 	print_file_stats(m, "batch pool", stats);
389 }
390 
391 #define count_vmas(list, member) do { \
392 	list_for_each_entry(vma, list, member) { \
393 		size += i915_gem_obj_ggtt_size(vma->obj); \
394 		++count; \
395 		if (vma->obj->map_and_fenceable) { \
396 			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
397 			++mappable_count; \
398 		} \
399 	} \
400 } while (0)
401 
402 static int i915_gem_object_info(struct seq_file *m, void* data)
403 {
404 	struct drm_info_node *node = m->private;
405 	struct drm_device *dev = node->minor->dev;
406 	struct drm_i915_private *dev_priv = dev->dev_private;
407 	u32 count, mappable_count, purgeable_count;
408 	size_t size, mappable_size, purgeable_size;
409 	struct drm_i915_gem_object *obj;
410 	struct i915_address_space *vm = &dev_priv->gtt.base;
411 	struct drm_file *file;
412 	struct i915_vma *vma;
413 	int ret;
414 
415 	ret = mutex_lock_interruptible(&dev->struct_mutex);
416 	if (ret)
417 		return ret;
418 
419 	seq_printf(m, "%u objects, %zu bytes\n",
420 		   dev_priv->mm.object_count,
421 		   dev_priv->mm.object_memory);
422 
423 	size = count = mappable_size = mappable_count = 0;
424 	count_objects(&dev_priv->mm.bound_list, global_list);
425 	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
426 		   count, mappable_count, size, mappable_size);
427 
428 	size = count = mappable_size = mappable_count = 0;
429 	count_vmas(&vm->active_list, mm_list);
430 	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
431 		   count, mappable_count, size, mappable_size);
432 
433 	size = count = mappable_size = mappable_count = 0;
434 	count_vmas(&vm->inactive_list, mm_list);
435 	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
436 		   count, mappable_count, size, mappable_size);
437 
438 	size = count = purgeable_size = purgeable_count = 0;
439 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
440 		size += obj->base.size, ++count;
441 		if (obj->madv == I915_MADV_DONTNEED)
442 			purgeable_size += obj->base.size, ++purgeable_count;
443 	}
444 	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
445 
446 	size = count = mappable_size = mappable_count = 0;
447 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
448 		if (obj->fault_mappable) {
449 			size += i915_gem_obj_ggtt_size(obj);
450 			++count;
451 		}
452 		if (obj->pin_mappable) {
453 			mappable_size += i915_gem_obj_ggtt_size(obj);
454 			++mappable_count;
455 		}
456 		if (obj->madv == I915_MADV_DONTNEED) {
457 			purgeable_size += obj->base.size;
458 			++purgeable_count;
459 		}
460 	}
461 	seq_printf(m, "%u purgeable objects, %zu bytes\n",
462 		   purgeable_count, purgeable_size);
463 	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
464 		   mappable_count, mappable_size);
465 	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
466 		   count, size);
467 
468 	seq_printf(m, "%zu [%lu] gtt total\n",
469 		   dev_priv->gtt.base.total,
470 		   dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
471 
472 	seq_putc(m, '\n');
473 	print_batch_pool_stats(m, dev_priv);
474 
475 	seq_putc(m, '\n');
476 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
477 		struct file_stats stats;
478 		struct task_struct *task;
479 
480 		memset(&stats, 0, sizeof(stats));
481 		stats.file_priv = file->driver_priv;
482 		spin_lock(&file->table_lock);
483 		idr_for_each(&file->object_idr, per_file_stats, &stats);
484 		spin_unlock(&file->table_lock);
485 		/*
486 		 * Although we have a valid reference on file->pid, that does
487 		 * not guarantee that the task_struct who called get_pid() is
488 		 * still alive (e.g. get_pid(current) => fork() => exit()).
489 		 * Therefore, we need to protect this ->comm access using RCU.
490 		 */
491 		rcu_read_lock();
492 		task = pid_task(file->pid, PIDTYPE_PID);
493 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
494 		rcu_read_unlock();
495 	}
496 
497 	mutex_unlock(&dev->struct_mutex);
498 
499 	return 0;
500 }
501 
502 static int i915_gem_gtt_info(struct seq_file *m, void *data)
503 {
504 	struct drm_info_node *node = m->private;
505 	struct drm_device *dev = node->minor->dev;
506 	uintptr_t list = (uintptr_t) node->info_ent->data;
507 	struct drm_i915_private *dev_priv = dev->dev_private;
508 	struct drm_i915_gem_object *obj;
509 	size_t total_obj_size, total_gtt_size;
510 	int count, ret;
511 
512 	ret = mutex_lock_interruptible(&dev->struct_mutex);
513 	if (ret)
514 		return ret;
515 
516 	total_obj_size = total_gtt_size = count = 0;
517 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
518 		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
519 			continue;
520 
521 		seq_puts(m, "   ");
522 		describe_obj(m, obj);
523 		seq_putc(m, '\n');
524 		total_obj_size += obj->base.size;
525 		total_gtt_size += i915_gem_obj_ggtt_size(obj);
526 		count++;
527 	}
528 
529 	mutex_unlock(&dev->struct_mutex);
530 
531 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
532 		   count, total_obj_size, total_gtt_size);
533 
534 	return 0;
535 }
536 
537 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
538 {
539 	struct drm_info_node *node = m->private;
540 	struct drm_device *dev = node->minor->dev;
541 	struct drm_i915_private *dev_priv = dev->dev_private;
542 	struct intel_crtc *crtc;
543 	int ret;
544 
545 	ret = mutex_lock_interruptible(&dev->struct_mutex);
546 	if (ret)
547 		return ret;
548 
549 	for_each_intel_crtc(dev, crtc) {
550 		const char pipe = pipe_name(crtc->pipe);
551 		const char plane = plane_name(crtc->plane);
552 		struct intel_unpin_work *work;
553 
554 		spin_lock_irq(&dev->event_lock);
555 		work = crtc->unpin_work;
556 		if (work == NULL) {
557 			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
558 				   pipe, plane);
559 		} else {
560 			u32 addr;
561 
562 			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
563 				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
564 					   pipe, plane);
565 			} else {
566 				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
567 					   pipe, plane);
568 			}
569 			if (work->flip_queued_req) {
570 				struct intel_engine_cs *ring =
571 					i915_gem_request_get_ring(work->flip_queued_req);
572 
573 				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
574 					   ring->name,
575 					   i915_gem_request_get_seqno(work->flip_queued_req),
576 					   dev_priv->next_seqno,
577 					   ring->get_seqno(ring, true),
578 					   i915_gem_request_completed(work->flip_queued_req, true));
579 			} else
580 				seq_printf(m, "Flip not associated with any ring\n");
581 			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
582 				   work->flip_queued_vblank,
583 				   work->flip_ready_vblank,
584 				   drm_crtc_vblank_count(&crtc->base));
585 			if (work->enable_stall_check)
586 				seq_puts(m, "Stall check enabled, ");
587 			else
588 				seq_puts(m, "Stall check waiting for page flip ioctl, ");
589 			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
590 
591 			if (INTEL_INFO(dev)->gen >= 4)
592 				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
593 			else
594 				addr = I915_READ(DSPADDR(crtc->plane));
595 			seq_printf(m, "Current scanout address 0x%08x\n", addr);
596 
597 			if (work->pending_flip_obj) {
598 				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
599 				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
600 			}
601 		}
602 		spin_unlock_irq(&dev->event_lock);
603 	}
604 
605 	mutex_unlock(&dev->struct_mutex);
606 
607 	return 0;
608 }
609 
610 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
611 {
612 	struct drm_info_node *node = m->private;
613 	struct drm_device *dev = node->minor->dev;
614 	struct drm_i915_private *dev_priv = dev->dev_private;
615 	struct drm_i915_gem_object *obj;
616 	int count = 0;
617 	int ret;
618 
619 	ret = mutex_lock_interruptible(&dev->struct_mutex);
620 	if (ret)
621 		return ret;
622 
623 	seq_puts(m, "cache:\n");
624 	list_for_each_entry(obj,
625 			    &dev_priv->mm.batch_pool.cache_list,
626 			    batch_pool_list) {
627 		seq_puts(m, "   ");
628 		describe_obj(m, obj);
629 		seq_putc(m, '\n');
630 		count++;
631 	}
632 
633 	seq_printf(m, "total: %d\n", count);
634 
635 	mutex_unlock(&dev->struct_mutex);
636 
637 	return 0;
638 }
639 
640 static int i915_gem_request_info(struct seq_file *m, void *data)
641 {
642 	struct drm_info_node *node = m->private;
643 	struct drm_device *dev = node->minor->dev;
644 	struct drm_i915_private *dev_priv = dev->dev_private;
645 	struct intel_engine_cs *ring;
646 	struct drm_i915_gem_request *gem_request;
647 	int ret, count, i;
648 
649 	ret = mutex_lock_interruptible(&dev->struct_mutex);
650 	if (ret)
651 		return ret;
652 
653 	count = 0;
654 	for_each_ring(ring, dev_priv, i) {
655 		if (list_empty(&ring->request_list))
656 			continue;
657 
658 		seq_printf(m, "%s requests:\n", ring->name);
659 		list_for_each_entry(gem_request,
660 				    &ring->request_list,
661 				    list) {
662 			seq_printf(m, "    %x @ %d\n",
663 				   gem_request->seqno,
664 				   (int) (jiffies - gem_request->emitted_jiffies));
665 		}
666 		count++;
667 	}
668 	mutex_unlock(&dev->struct_mutex);
669 
670 	if (count == 0)
671 		seq_puts(m, "No requests\n");
672 
673 	return 0;
674 }
675 
676 static void i915_ring_seqno_info(struct seq_file *m,
677 				 struct intel_engine_cs *ring)
678 {
679 	if (ring->get_seqno) {
680 		seq_printf(m, "Current sequence (%s): %x\n",
681 			   ring->name, ring->get_seqno(ring, false));
682 	}
683 }
684 
685 static int i915_gem_seqno_info(struct seq_file *m, void *data)
686 {
687 	struct drm_info_node *node = m->private;
688 	struct drm_device *dev = node->minor->dev;
689 	struct drm_i915_private *dev_priv = dev->dev_private;
690 	struct intel_engine_cs *ring;
691 	int ret, i;
692 
693 	ret = mutex_lock_interruptible(&dev->struct_mutex);
694 	if (ret)
695 		return ret;
696 	intel_runtime_pm_get(dev_priv);
697 
698 	for_each_ring(ring, dev_priv, i)
699 		i915_ring_seqno_info(m, ring);
700 
701 	intel_runtime_pm_put(dev_priv);
702 	mutex_unlock(&dev->struct_mutex);
703 
704 	return 0;
705 }
706 
707 
708 static int i915_interrupt_info(struct seq_file *m, void *data)
709 {
710 	struct drm_info_node *node = m->private;
711 	struct drm_device *dev = node->minor->dev;
712 	struct drm_i915_private *dev_priv = dev->dev_private;
713 	struct intel_engine_cs *ring;
714 	int ret, i, pipe;
715 
716 	ret = mutex_lock_interruptible(&dev->struct_mutex);
717 	if (ret)
718 		return ret;
719 	intel_runtime_pm_get(dev_priv);
720 
721 	if (IS_CHERRYVIEW(dev)) {
722 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
723 			   I915_READ(GEN8_MASTER_IRQ));
724 
725 		seq_printf(m, "Display IER:\t%08x\n",
726 			   I915_READ(VLV_IER));
727 		seq_printf(m, "Display IIR:\t%08x\n",
728 			   I915_READ(VLV_IIR));
729 		seq_printf(m, "Display IIR_RW:\t%08x\n",
730 			   I915_READ(VLV_IIR_RW));
731 		seq_printf(m, "Display IMR:\t%08x\n",
732 			   I915_READ(VLV_IMR));
733 		for_each_pipe(dev_priv, pipe)
734 			seq_printf(m, "Pipe %c stat:\t%08x\n",
735 				   pipe_name(pipe),
736 				   I915_READ(PIPESTAT(pipe)));
737 
738 		seq_printf(m, "Port hotplug:\t%08x\n",
739 			   I915_READ(PORT_HOTPLUG_EN));
740 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
741 			   I915_READ(VLV_DPFLIPSTAT));
742 		seq_printf(m, "DPINVGTT:\t%08x\n",
743 			   I915_READ(DPINVGTT));
744 
745 		for (i = 0; i < 4; i++) {
746 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
747 				   i, I915_READ(GEN8_GT_IMR(i)));
748 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
749 				   i, I915_READ(GEN8_GT_IIR(i)));
750 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
751 				   i, I915_READ(GEN8_GT_IER(i)));
752 		}
753 
754 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
755 			   I915_READ(GEN8_PCU_IMR));
756 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
757 			   I915_READ(GEN8_PCU_IIR));
758 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
759 			   I915_READ(GEN8_PCU_IER));
760 	} else if (INTEL_INFO(dev)->gen >= 8) {
761 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
762 			   I915_READ(GEN8_MASTER_IRQ));
763 
764 		for (i = 0; i < 4; i++) {
765 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
766 				   i, I915_READ(GEN8_GT_IMR(i)));
767 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
768 				   i, I915_READ(GEN8_GT_IIR(i)));
769 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
770 				   i, I915_READ(GEN8_GT_IER(i)));
771 		}
772 
773 		for_each_pipe(dev_priv, pipe) {
774 			if (!intel_display_power_is_enabled(dev_priv,
775 						POWER_DOMAIN_PIPE(pipe))) {
776 				seq_printf(m, "Pipe %c power disabled\n",
777 					   pipe_name(pipe));
778 				continue;
779 			}
780 			seq_printf(m, "Pipe %c IMR:\t%08x\n",
781 				   pipe_name(pipe),
782 				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
783 			seq_printf(m, "Pipe %c IIR:\t%08x\n",
784 				   pipe_name(pipe),
785 				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
786 			seq_printf(m, "Pipe %c IER:\t%08x\n",
787 				   pipe_name(pipe),
788 				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
789 		}
790 
791 		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
792 			   I915_READ(GEN8_DE_PORT_IMR));
793 		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
794 			   I915_READ(GEN8_DE_PORT_IIR));
795 		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
796 			   I915_READ(GEN8_DE_PORT_IER));
797 
798 		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
799 			   I915_READ(GEN8_DE_MISC_IMR));
800 		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
801 			   I915_READ(GEN8_DE_MISC_IIR));
802 		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
803 			   I915_READ(GEN8_DE_MISC_IER));
804 
805 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
806 			   I915_READ(GEN8_PCU_IMR));
807 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
808 			   I915_READ(GEN8_PCU_IIR));
809 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
810 			   I915_READ(GEN8_PCU_IER));
811 	} else if (IS_VALLEYVIEW(dev)) {
812 		seq_printf(m, "Display IER:\t%08x\n",
813 			   I915_READ(VLV_IER));
814 		seq_printf(m, "Display IIR:\t%08x\n",
815 			   I915_READ(VLV_IIR));
816 		seq_printf(m, "Display IIR_RW:\t%08x\n",
817 			   I915_READ(VLV_IIR_RW));
818 		seq_printf(m, "Display IMR:\t%08x\n",
819 			   I915_READ(VLV_IMR));
820 		for_each_pipe(dev_priv, pipe)
821 			seq_printf(m, "Pipe %c stat:\t%08x\n",
822 				   pipe_name(pipe),
823 				   I915_READ(PIPESTAT(pipe)));
824 
825 		seq_printf(m, "Master IER:\t%08x\n",
826 			   I915_READ(VLV_MASTER_IER));
827 
828 		seq_printf(m, "Render IER:\t%08x\n",
829 			   I915_READ(GTIER));
830 		seq_printf(m, "Render IIR:\t%08x\n",
831 			   I915_READ(GTIIR));
832 		seq_printf(m, "Render IMR:\t%08x\n",
833 			   I915_READ(GTIMR));
834 
835 		seq_printf(m, "PM IER:\t\t%08x\n",
836 			   I915_READ(GEN6_PMIER));
837 		seq_printf(m, "PM IIR:\t\t%08x\n",
838 			   I915_READ(GEN6_PMIIR));
839 		seq_printf(m, "PM IMR:\t\t%08x\n",
840 			   I915_READ(GEN6_PMIMR));
841 
842 		seq_printf(m, "Port hotplug:\t%08x\n",
843 			   I915_READ(PORT_HOTPLUG_EN));
844 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
845 			   I915_READ(VLV_DPFLIPSTAT));
846 		seq_printf(m, "DPINVGTT:\t%08x\n",
847 			   I915_READ(DPINVGTT));
848 
849 	} else if (!HAS_PCH_SPLIT(dev)) {
850 		seq_printf(m, "Interrupt enable:    %08x\n",
851 			   I915_READ(IER));
852 		seq_printf(m, "Interrupt identity:  %08x\n",
853 			   I915_READ(IIR));
854 		seq_printf(m, "Interrupt mask:      %08x\n",
855 			   I915_READ(IMR));
856 		for_each_pipe(dev_priv, pipe)
857 			seq_printf(m, "Pipe %c stat:         %08x\n",
858 				   pipe_name(pipe),
859 				   I915_READ(PIPESTAT(pipe)));
860 	} else {
861 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
862 			   I915_READ(DEIER));
863 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
864 			   I915_READ(DEIIR));
865 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
866 			   I915_READ(DEIMR));
867 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
868 			   I915_READ(SDEIER));
869 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
870 			   I915_READ(SDEIIR));
871 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
872 			   I915_READ(SDEIMR));
873 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
874 			   I915_READ(GTIER));
875 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
876 			   I915_READ(GTIIR));
877 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
878 			   I915_READ(GTIMR));
879 	}
880 	for_each_ring(ring, dev_priv, i) {
881 		if (INTEL_INFO(dev)->gen >= 6) {
882 			seq_printf(m,
883 				   "Graphics Interrupt mask (%s):	%08x\n",
884 				   ring->name, I915_READ_IMR(ring));
885 		}
886 		i915_ring_seqno_info(m, ring);
887 	}
888 	intel_runtime_pm_put(dev_priv);
889 	mutex_unlock(&dev->struct_mutex);
890 
891 	return 0;
892 }
893 
894 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
895 {
896 	struct drm_info_node *node = m->private;
897 	struct drm_device *dev = node->minor->dev;
898 	struct drm_i915_private *dev_priv = dev->dev_private;
899 	int i, ret;
900 
901 	ret = mutex_lock_interruptible(&dev->struct_mutex);
902 	if (ret)
903 		return ret;
904 
905 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
906 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
907 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
908 		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
909 
910 		seq_printf(m, "Fence %d, pin count = %d, object = ",
911 			   i, dev_priv->fence_regs[i].pin_count);
912 		if (obj == NULL)
913 			seq_puts(m, "unused");
914 		else
915 			describe_obj(m, obj);
916 		seq_putc(m, '\n');
917 	}
918 
919 	mutex_unlock(&dev->struct_mutex);
920 	return 0;
921 }
922 
923 static int i915_hws_info(struct seq_file *m, void *data)
924 {
925 	struct drm_info_node *node = m->private;
926 	struct drm_device *dev = node->minor->dev;
927 	struct drm_i915_private *dev_priv = dev->dev_private;
928 	struct intel_engine_cs *ring;
929 	const u32 *hws;
930 	int i;
931 
932 	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
933 	hws = ring->status_page.page_addr;
934 	if (hws == NULL)
935 		return 0;
936 
937 	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
938 		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
939 			   i * 4,
940 			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
941 	}
942 	return 0;
943 }
944 
945 static ssize_t
946 i915_error_state_write(struct file *filp,
947 		       const char __user *ubuf,
948 		       size_t cnt,
949 		       loff_t *ppos)
950 {
951 	struct i915_error_state_file_priv *error_priv = filp->private_data;
952 	struct drm_device *dev = error_priv->dev;
953 	int ret;
954 
955 	DRM_DEBUG_DRIVER("Resetting error state\n");
956 
957 	ret = mutex_lock_interruptible(&dev->struct_mutex);
958 	if (ret)
959 		return ret;
960 
961 	i915_destroy_error_state(dev);
962 	mutex_unlock(&dev->struct_mutex);
963 
964 	return cnt;
965 }
966 
967 static int i915_error_state_open(struct inode *inode, struct file *file)
968 {
969 	struct drm_device *dev = inode->i_private;
970 	struct i915_error_state_file_priv *error_priv;
971 
972 	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
973 	if (!error_priv)
974 		return -ENOMEM;
975 
976 	error_priv->dev = dev;
977 
978 	i915_error_state_get(dev, error_priv);
979 
980 	file->private_data = error_priv;
981 
982 	return 0;
983 }
984 
985 static int i915_error_state_release(struct inode *inode, struct file *file)
986 {
987 	struct i915_error_state_file_priv *error_priv = file->private_data;
988 
989 	i915_error_state_put(error_priv);
990 	kfree(error_priv);
991 
992 	return 0;
993 }
994 
995 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
996 				     size_t count, loff_t *pos)
997 {
998 	struct i915_error_state_file_priv *error_priv = file->private_data;
999 	struct drm_i915_error_state_buf error_str;
1000 	loff_t tmp_pos = 0;
1001 	ssize_t ret_count = 0;
1002 	int ret;
1003 
1004 	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
1005 	if (ret)
1006 		return ret;
1007 
1008 	ret = i915_error_state_to_str(&error_str, error_priv);
1009 	if (ret)
1010 		goto out;
1011 
1012 	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1013 					    error_str.buf,
1014 					    error_str.bytes);
1015 
1016 	if (ret_count < 0)
1017 		ret = ret_count;
1018 	else
1019 		*pos = error_str.start + ret_count;
1020 out:
1021 	i915_error_state_buf_release(&error_str);
1022 	return ret ?: ret_count;
1023 }
1024 
1025 static const struct file_operations i915_error_state_fops = {
1026 	.owner = THIS_MODULE,
1027 	.open = i915_error_state_open,
1028 	.read = i915_error_state_read,
1029 	.write = i915_error_state_write,
1030 	.llseek = default_llseek,
1031 	.release = i915_error_state_release,
1032 };
1033 
1034 static int
1035 i915_next_seqno_get(void *data, u64 *val)
1036 {
1037 	struct drm_device *dev = data;
1038 	struct drm_i915_private *dev_priv = dev->dev_private;
1039 	int ret;
1040 
1041 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1042 	if (ret)
1043 		return ret;
1044 
1045 	*val = dev_priv->next_seqno;
1046 	mutex_unlock(&dev->struct_mutex);
1047 
1048 	return 0;
1049 }
1050 
1051 static int
1052 i915_next_seqno_set(void *data, u64 val)
1053 {
1054 	struct drm_device *dev = data;
1055 	int ret;
1056 
1057 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1058 	if (ret)
1059 		return ret;
1060 
1061 	ret = i915_gem_set_seqno(dev, val);
1062 	mutex_unlock(&dev->struct_mutex);
1063 
1064 	return ret;
1065 }
1066 
1067 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1068 			i915_next_seqno_get, i915_next_seqno_set,
1069 			"0x%llx\n");
1070 
1071 static int i915_frequency_info(struct seq_file *m, void *unused)
1072 {
1073 	struct drm_info_node *node = m->private;
1074 	struct drm_device *dev = node->minor->dev;
1075 	struct drm_i915_private *dev_priv = dev->dev_private;
1076 	int ret = 0;
1077 
1078 	intel_runtime_pm_get(dev_priv);
1079 
1080 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1081 
1082 	if (IS_GEN5(dev)) {
1083 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1084 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1085 
1086 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1087 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1088 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1089 			   MEMSTAT_VID_SHIFT);
1090 		seq_printf(m, "Current P-state: %d\n",
1091 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1092 	} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
1093 		   IS_BROADWELL(dev) || IS_GEN9(dev)) {
1094 		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1095 		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1096 		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1097 		u32 rpmodectl, rpinclimit, rpdeclimit;
1098 		u32 rpstat, cagf, reqf;
1099 		u32 rpupei, rpcurup, rpprevup;
1100 		u32 rpdownei, rpcurdown, rpprevdown;
1101 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1102 		int max_freq;
1103 
1104 		/* RPSTAT1 is in the GT power well */
1105 		ret = mutex_lock_interruptible(&dev->struct_mutex);
1106 		if (ret)
1107 			goto out;
1108 
1109 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1110 
1111 		reqf = I915_READ(GEN6_RPNSWREQ);
1112 		if (IS_GEN9(dev))
1113 			reqf >>= 23;
1114 		else {
1115 			reqf &= ~GEN6_TURBO_DISABLE;
1116 			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1117 				reqf >>= 24;
1118 			else
1119 				reqf >>= 25;
1120 		}
1121 		reqf = intel_gpu_freq(dev_priv, reqf);
1122 
1123 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1124 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1125 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1126 
1127 		rpstat = I915_READ(GEN6_RPSTAT1);
1128 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1129 		rpcurup = I915_READ(GEN6_RP_CUR_UP);
1130 		rpprevup = I915_READ(GEN6_RP_PREV_UP);
1131 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1132 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1133 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1134 		if (IS_GEN9(dev))
1135 			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1136 		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1137 			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1138 		else
1139 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1140 		cagf = intel_gpu_freq(dev_priv, cagf);
1141 
1142 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1143 		mutex_unlock(&dev->struct_mutex);
1144 
1145 		if (IS_GEN6(dev) || IS_GEN7(dev)) {
1146 			pm_ier = I915_READ(GEN6_PMIER);
1147 			pm_imr = I915_READ(GEN6_PMIMR);
1148 			pm_isr = I915_READ(GEN6_PMISR);
1149 			pm_iir = I915_READ(GEN6_PMIIR);
1150 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1151 		} else {
1152 			pm_ier = I915_READ(GEN8_GT_IER(2));
1153 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1154 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1155 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1156 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1157 		}
1158 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1159 			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1160 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1161 		seq_printf(m, "Render p-state ratio: %d\n",
1162 			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
1163 		seq_printf(m, "Render p-state VID: %d\n",
1164 			   gt_perf_status & 0xff);
1165 		seq_printf(m, "Render p-state limit: %d\n",
1166 			   rp_state_limits & 0xff);
1167 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1168 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1169 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1170 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1171 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1172 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1173 		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1174 			   GEN6_CURICONT_MASK);
1175 		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1176 			   GEN6_CURBSYTAVG_MASK);
1177 		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1178 			   GEN6_CURBSYTAVG_MASK);
1179 		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1180 			   GEN6_CURIAVG_MASK);
1181 		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1182 			   GEN6_CURBSYTAVG_MASK);
1183 		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1184 			   GEN6_CURBSYTAVG_MASK);
1185 
1186 		max_freq = (rp_state_cap & 0xff0000) >> 16;
1187 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
1188 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1189 			   intel_gpu_freq(dev_priv, max_freq));
1190 
1191 		max_freq = (rp_state_cap & 0xff00) >> 8;
1192 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
1193 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1194 			   intel_gpu_freq(dev_priv, max_freq));
1195 
1196 		max_freq = rp_state_cap & 0xff;
1197 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
1198 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1199 			   intel_gpu_freq(dev_priv, max_freq));
1200 
1201 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1202 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1203 
1204 		seq_printf(m, "Idle freq: %d MHz\n",
1205 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1206 	} else if (IS_VALLEYVIEW(dev)) {
1207 		u32 freq_sts;
1208 
1209 		mutex_lock(&dev_priv->rps.hw_lock);
1210 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1211 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1212 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1213 
1214 		seq_printf(m, "max GPU freq: %d MHz\n",
1215 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1216 
1217 		seq_printf(m, "min GPU freq: %d MHz\n",
1218 			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1219 
1220 		seq_printf(m, "idle GPU freq: %d MHz\n",
1221 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1222 
1223 		seq_printf(m,
1224 			   "efficient (RPe) frequency: %d MHz\n",
1225 			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1226 
1227 		seq_printf(m, "current GPU freq: %d MHz\n",
1228 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1229 		mutex_unlock(&dev_priv->rps.hw_lock);
1230 	} else {
1231 		seq_puts(m, "no P-state info available\n");
1232 	}
1233 
1234 out:
1235 	intel_runtime_pm_put(dev_priv);
1236 	return ret;
1237 }
1238 
1239 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1240 {
1241 	struct drm_info_node *node = m->private;
1242 	struct drm_device *dev = node->minor->dev;
1243 	struct drm_i915_private *dev_priv = dev->dev_private;
1244 	struct intel_engine_cs *ring;
1245 	u64 acthd[I915_NUM_RINGS];
1246 	u32 seqno[I915_NUM_RINGS];
1247 	int i;
1248 
1249 	if (!i915.enable_hangcheck) {
1250 		seq_printf(m, "Hangcheck disabled\n");
1251 		return 0;
1252 	}
1253 
1254 	intel_runtime_pm_get(dev_priv);
1255 
1256 	for_each_ring(ring, dev_priv, i) {
1257 		seqno[i] = ring->get_seqno(ring, false);
1258 		acthd[i] = intel_ring_get_active_head(ring);
1259 	}
1260 
1261 	intel_runtime_pm_put(dev_priv);
1262 
1263 	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1264 		seq_printf(m, "Hangcheck active, fires in %dms\n",
1265 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1266 					    jiffies));
1267 	} else
1268 		seq_printf(m, "Hangcheck inactive\n");
1269 
1270 	for_each_ring(ring, dev_priv, i) {
1271 		seq_printf(m, "%s:\n", ring->name);
1272 		seq_printf(m, "\tseqno = %x [current %x]\n",
1273 			   ring->hangcheck.seqno, seqno[i]);
1274 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1275 			   (long long)ring->hangcheck.acthd,
1276 			   (long long)acthd[i]);
1277 		seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
1278 			   (long long)ring->hangcheck.max_acthd);
1279 		seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
1280 		seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
1281 	}
1282 
1283 	return 0;
1284 }
1285 
1286 static int ironlake_drpc_info(struct seq_file *m)
1287 {
1288 	struct drm_info_node *node = m->private;
1289 	struct drm_device *dev = node->minor->dev;
1290 	struct drm_i915_private *dev_priv = dev->dev_private;
1291 	u32 rgvmodectl, rstdbyctl;
1292 	u16 crstandvid;
1293 	int ret;
1294 
1295 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1296 	if (ret)
1297 		return ret;
1298 	intel_runtime_pm_get(dev_priv);
1299 
1300 	rgvmodectl = I915_READ(MEMMODECTL);
1301 	rstdbyctl = I915_READ(RSTDBYCTL);
1302 	crstandvid = I915_READ16(CRSTANDVID);
1303 
1304 	intel_runtime_pm_put(dev_priv);
1305 	mutex_unlock(&dev->struct_mutex);
1306 
1307 	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1308 		   "yes" : "no");
1309 	seq_printf(m, "Boost freq: %d\n",
1310 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1311 		   MEMMODE_BOOST_FREQ_SHIFT);
1312 	seq_printf(m, "HW control enabled: %s\n",
1313 		   rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1314 	seq_printf(m, "SW control enabled: %s\n",
1315 		   rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1316 	seq_printf(m, "Gated voltage change: %s\n",
1317 		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1318 	seq_printf(m, "Starting frequency: P%d\n",
1319 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1320 	seq_printf(m, "Max P-state: P%d\n",
1321 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1322 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1323 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1324 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1325 	seq_printf(m, "Render standby enabled: %s\n",
1326 		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1327 	seq_puts(m, "Current RS state: ");
1328 	switch (rstdbyctl & RSX_STATUS_MASK) {
1329 	case RSX_STATUS_ON:
1330 		seq_puts(m, "on\n");
1331 		break;
1332 	case RSX_STATUS_RC1:
1333 		seq_puts(m, "RC1\n");
1334 		break;
1335 	case RSX_STATUS_RC1E:
1336 		seq_puts(m, "RC1E\n");
1337 		break;
1338 	case RSX_STATUS_RS1:
1339 		seq_puts(m, "RS1\n");
1340 		break;
1341 	case RSX_STATUS_RS2:
1342 		seq_puts(m, "RS2 (RC6)\n");
1343 		break;
1344 	case RSX_STATUS_RS3:
1345 		seq_puts(m, "RC3 (RC6+)\n");
1346 		break;
1347 	default:
1348 		seq_puts(m, "unknown\n");
1349 		break;
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 static int i915_forcewake_domains(struct seq_file *m, void *data)
1356 {
1357 	struct drm_info_node *node = m->private;
1358 	struct drm_device *dev = node->minor->dev;
1359 	struct drm_i915_private *dev_priv = dev->dev_private;
1360 	struct intel_uncore_forcewake_domain *fw_domain;
1361 	int i;
1362 
1363 	spin_lock_irq(&dev_priv->uncore.lock);
1364 	for_each_fw_domain(fw_domain, dev_priv, i) {
1365 		seq_printf(m, "%s.wake_count = %u\n",
1366 			   intel_uncore_forcewake_domain_to_str(i),
1367 			   fw_domain->wake_count);
1368 	}
1369 	spin_unlock_irq(&dev_priv->uncore.lock);
1370 
1371 	return 0;
1372 }
1373 
1374 static int vlv_drpc_info(struct seq_file *m)
1375 {
1376 	struct drm_info_node *node = m->private;
1377 	struct drm_device *dev = node->minor->dev;
1378 	struct drm_i915_private *dev_priv = dev->dev_private;
1379 	u32 rpmodectl1, rcctl1, pw_status;
1380 
1381 	intel_runtime_pm_get(dev_priv);
1382 
1383 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1384 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1385 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1386 
1387 	intel_runtime_pm_put(dev_priv);
1388 
1389 	seq_printf(m, "Video Turbo Mode: %s\n",
1390 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1391 	seq_printf(m, "Turbo enabled: %s\n",
1392 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1393 	seq_printf(m, "HW control enabled: %s\n",
1394 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1395 	seq_printf(m, "SW control enabled: %s\n",
1396 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1397 			  GEN6_RP_MEDIA_SW_MODE));
1398 	seq_printf(m, "RC6 Enabled: %s\n",
1399 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1400 					GEN6_RC_CTL_EI_MODE(1))));
1401 	seq_printf(m, "Render Power Well: %s\n",
1402 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1403 	seq_printf(m, "Media Power Well: %s\n",
1404 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1405 
1406 	seq_printf(m, "Render RC6 residency since boot: %u\n",
1407 		   I915_READ(VLV_GT_RENDER_RC6));
1408 	seq_printf(m, "Media RC6 residency since boot: %u\n",
1409 		   I915_READ(VLV_GT_MEDIA_RC6));
1410 
1411 	return i915_forcewake_domains(m, NULL);
1412 }
1413 
1414 static int gen6_drpc_info(struct seq_file *m)
1415 {
1416 	struct drm_info_node *node = m->private;
1417 	struct drm_device *dev = node->minor->dev;
1418 	struct drm_i915_private *dev_priv = dev->dev_private;
1419 	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1420 	unsigned forcewake_count;
1421 	int count = 0, ret;
1422 
1423 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1424 	if (ret)
1425 		return ret;
1426 	intel_runtime_pm_get(dev_priv);
1427 
1428 	spin_lock_irq(&dev_priv->uncore.lock);
1429 	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1430 	spin_unlock_irq(&dev_priv->uncore.lock);
1431 
1432 	if (forcewake_count) {
1433 		seq_puts(m, "RC information inaccurate because somebody "
1434 			    "holds a forcewake reference \n");
1435 	} else {
1436 		/* NB: we cannot use forcewake, else we read the wrong values */
1437 		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1438 			udelay(10);
1439 		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1440 	}
1441 
1442 	gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1443 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1444 
1445 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1446 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1447 	mutex_unlock(&dev->struct_mutex);
1448 	mutex_lock(&dev_priv->rps.hw_lock);
1449 	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1450 	mutex_unlock(&dev_priv->rps.hw_lock);
1451 
1452 	intel_runtime_pm_put(dev_priv);
1453 
1454 	seq_printf(m, "Video Turbo Mode: %s\n",
1455 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1456 	seq_printf(m, "HW control enabled: %s\n",
1457 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1458 	seq_printf(m, "SW control enabled: %s\n",
1459 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1460 			  GEN6_RP_MEDIA_SW_MODE));
1461 	seq_printf(m, "RC1e Enabled: %s\n",
1462 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1463 	seq_printf(m, "RC6 Enabled: %s\n",
1464 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1465 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1466 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1467 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1468 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1469 	seq_puts(m, "Current RC state: ");
1470 	switch (gt_core_status & GEN6_RCn_MASK) {
1471 	case GEN6_RC0:
1472 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1473 			seq_puts(m, "Core Power Down\n");
1474 		else
1475 			seq_puts(m, "on\n");
1476 		break;
1477 	case GEN6_RC3:
1478 		seq_puts(m, "RC3\n");
1479 		break;
1480 	case GEN6_RC6:
1481 		seq_puts(m, "RC6\n");
1482 		break;
1483 	case GEN6_RC7:
1484 		seq_puts(m, "RC7\n");
1485 		break;
1486 	default:
1487 		seq_puts(m, "Unknown\n");
1488 		break;
1489 	}
1490 
1491 	seq_printf(m, "Core Power Down: %s\n",
1492 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1493 
1494 	/* Not exactly sure what this is */
1495 	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1496 		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1497 	seq_printf(m, "RC6 residency since boot: %u\n",
1498 		   I915_READ(GEN6_GT_GFX_RC6));
1499 	seq_printf(m, "RC6+ residency since boot: %u\n",
1500 		   I915_READ(GEN6_GT_GFX_RC6p));
1501 	seq_printf(m, "RC6++ residency since boot: %u\n",
1502 		   I915_READ(GEN6_GT_GFX_RC6pp));
1503 
1504 	seq_printf(m, "RC6   voltage: %dmV\n",
1505 		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1506 	seq_printf(m, "RC6+  voltage: %dmV\n",
1507 		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1508 	seq_printf(m, "RC6++ voltage: %dmV\n",
1509 		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1510 	return 0;
1511 }
1512 
1513 static int i915_drpc_info(struct seq_file *m, void *unused)
1514 {
1515 	struct drm_info_node *node = m->private;
1516 	struct drm_device *dev = node->minor->dev;
1517 
1518 	if (IS_VALLEYVIEW(dev))
1519 		return vlv_drpc_info(m);
1520 	else if (INTEL_INFO(dev)->gen >= 6)
1521 		return gen6_drpc_info(m);
1522 	else
1523 		return ironlake_drpc_info(m);
1524 }
1525 
1526 static int i915_fbc_status(struct seq_file *m, void *unused)
1527 {
1528 	struct drm_info_node *node = m->private;
1529 	struct drm_device *dev = node->minor->dev;
1530 	struct drm_i915_private *dev_priv = dev->dev_private;
1531 
1532 	if (!HAS_FBC(dev)) {
1533 		seq_puts(m, "FBC unsupported on this chipset\n");
1534 		return 0;
1535 	}
1536 
1537 	intel_runtime_pm_get(dev_priv);
1538 
1539 	if (intel_fbc_enabled(dev)) {
1540 		seq_puts(m, "FBC enabled\n");
1541 	} else {
1542 		seq_puts(m, "FBC disabled: ");
1543 		switch (dev_priv->fbc.no_fbc_reason) {
1544 		case FBC_OK:
1545 			seq_puts(m, "FBC actived, but currently disabled in hardware");
1546 			break;
1547 		case FBC_UNSUPPORTED:
1548 			seq_puts(m, "unsupported by this chipset");
1549 			break;
1550 		case FBC_NO_OUTPUT:
1551 			seq_puts(m, "no outputs");
1552 			break;
1553 		case FBC_STOLEN_TOO_SMALL:
1554 			seq_puts(m, "not enough stolen memory");
1555 			break;
1556 		case FBC_UNSUPPORTED_MODE:
1557 			seq_puts(m, "mode not supported");
1558 			break;
1559 		case FBC_MODE_TOO_LARGE:
1560 			seq_puts(m, "mode too large");
1561 			break;
1562 		case FBC_BAD_PLANE:
1563 			seq_puts(m, "FBC unsupported on plane");
1564 			break;
1565 		case FBC_NOT_TILED:
1566 			seq_puts(m, "scanout buffer not tiled");
1567 			break;
1568 		case FBC_MULTIPLE_PIPES:
1569 			seq_puts(m, "multiple pipes are enabled");
1570 			break;
1571 		case FBC_MODULE_PARAM:
1572 			seq_puts(m, "disabled per module param (default off)");
1573 			break;
1574 		case FBC_CHIP_DEFAULT:
1575 			seq_puts(m, "disabled per chip default");
1576 			break;
1577 		default:
1578 			seq_puts(m, "unknown reason");
1579 		}
1580 		seq_putc(m, '\n');
1581 	}
1582 
1583 	intel_runtime_pm_put(dev_priv);
1584 
1585 	return 0;
1586 }
1587 
1588 static int i915_fbc_fc_get(void *data, u64 *val)
1589 {
1590 	struct drm_device *dev = data;
1591 	struct drm_i915_private *dev_priv = dev->dev_private;
1592 
1593 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1594 		return -ENODEV;
1595 
1596 	drm_modeset_lock_all(dev);
1597 	*val = dev_priv->fbc.false_color;
1598 	drm_modeset_unlock_all(dev);
1599 
1600 	return 0;
1601 }
1602 
1603 static int i915_fbc_fc_set(void *data, u64 val)
1604 {
1605 	struct drm_device *dev = data;
1606 	struct drm_i915_private *dev_priv = dev->dev_private;
1607 	u32 reg;
1608 
1609 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1610 		return -ENODEV;
1611 
1612 	drm_modeset_lock_all(dev);
1613 
1614 	reg = I915_READ(ILK_DPFC_CONTROL);
1615 	dev_priv->fbc.false_color = val;
1616 
1617 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1618 		   (reg | FBC_CTL_FALSE_COLOR) :
1619 		   (reg & ~FBC_CTL_FALSE_COLOR));
1620 
1621 	drm_modeset_unlock_all(dev);
1622 	return 0;
1623 }
1624 
1625 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1626 			i915_fbc_fc_get, i915_fbc_fc_set,
1627 			"%llu\n");
1628 
1629 static int i915_ips_status(struct seq_file *m, void *unused)
1630 {
1631 	struct drm_info_node *node = m->private;
1632 	struct drm_device *dev = node->minor->dev;
1633 	struct drm_i915_private *dev_priv = dev->dev_private;
1634 
1635 	if (!HAS_IPS(dev)) {
1636 		seq_puts(m, "not supported\n");
1637 		return 0;
1638 	}
1639 
1640 	intel_runtime_pm_get(dev_priv);
1641 
1642 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1643 		   yesno(i915.enable_ips));
1644 
1645 	if (INTEL_INFO(dev)->gen >= 8) {
1646 		seq_puts(m, "Currently: unknown\n");
1647 	} else {
1648 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1649 			seq_puts(m, "Currently: enabled\n");
1650 		else
1651 			seq_puts(m, "Currently: disabled\n");
1652 	}
1653 
1654 	intel_runtime_pm_put(dev_priv);
1655 
1656 	return 0;
1657 }
1658 
1659 static int i915_sr_status(struct seq_file *m, void *unused)
1660 {
1661 	struct drm_info_node *node = m->private;
1662 	struct drm_device *dev = node->minor->dev;
1663 	struct drm_i915_private *dev_priv = dev->dev_private;
1664 	bool sr_enabled = false;
1665 
1666 	intel_runtime_pm_get(dev_priv);
1667 
1668 	if (HAS_PCH_SPLIT(dev))
1669 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1670 	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1671 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1672 	else if (IS_I915GM(dev))
1673 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1674 	else if (IS_PINEVIEW(dev))
1675 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1676 
1677 	intel_runtime_pm_put(dev_priv);
1678 
1679 	seq_printf(m, "self-refresh: %s\n",
1680 		   sr_enabled ? "enabled" : "disabled");
1681 
1682 	return 0;
1683 }
1684 
1685 static int i915_emon_status(struct seq_file *m, void *unused)
1686 {
1687 	struct drm_info_node *node = m->private;
1688 	struct drm_device *dev = node->minor->dev;
1689 	struct drm_i915_private *dev_priv = dev->dev_private;
1690 	unsigned long temp, chipset, gfx;
1691 	int ret;
1692 
1693 	if (!IS_GEN5(dev))
1694 		return -ENODEV;
1695 
1696 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1697 	if (ret)
1698 		return ret;
1699 
1700 	temp = i915_mch_val(dev_priv);
1701 	chipset = i915_chipset_val(dev_priv);
1702 	gfx = i915_gfx_val(dev_priv);
1703 	mutex_unlock(&dev->struct_mutex);
1704 
1705 	seq_printf(m, "GMCH temp: %ld\n", temp);
1706 	seq_printf(m, "Chipset power: %ld\n", chipset);
1707 	seq_printf(m, "GFX power: %ld\n", gfx);
1708 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1709 
1710 	return 0;
1711 }
1712 
1713 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1714 {
1715 	struct drm_info_node *node = m->private;
1716 	struct drm_device *dev = node->minor->dev;
1717 	struct drm_i915_private *dev_priv = dev->dev_private;
1718 	int ret = 0;
1719 	int gpu_freq, ia_freq;
1720 
1721 	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1722 		seq_puts(m, "unsupported on this chipset\n");
1723 		return 0;
1724 	}
1725 
1726 	intel_runtime_pm_get(dev_priv);
1727 
1728 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1729 
1730 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1731 	if (ret)
1732 		goto out;
1733 
1734 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1735 
1736 	for (gpu_freq = dev_priv->rps.min_freq_softlimit;
1737 	     gpu_freq <= dev_priv->rps.max_freq_softlimit;
1738 	     gpu_freq++) {
1739 		ia_freq = gpu_freq;
1740 		sandybridge_pcode_read(dev_priv,
1741 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1742 				       &ia_freq);
1743 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1744 			   intel_gpu_freq(dev_priv, gpu_freq),
1745 			   ((ia_freq >> 0) & 0xff) * 100,
1746 			   ((ia_freq >> 8) & 0xff) * 100);
1747 	}
1748 
1749 	mutex_unlock(&dev_priv->rps.hw_lock);
1750 
1751 out:
1752 	intel_runtime_pm_put(dev_priv);
1753 	return ret;
1754 }
1755 
1756 static int i915_opregion(struct seq_file *m, void *unused)
1757 {
1758 	struct drm_info_node *node = m->private;
1759 	struct drm_device *dev = node->minor->dev;
1760 	struct drm_i915_private *dev_priv = dev->dev_private;
1761 	struct intel_opregion *opregion = &dev_priv->opregion;
1762 	void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1763 	int ret;
1764 
1765 	if (data == NULL)
1766 		return -ENOMEM;
1767 
1768 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1769 	if (ret)
1770 		goto out;
1771 
1772 	if (opregion->header) {
1773 		memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1774 		seq_write(m, data, OPREGION_SIZE);
1775 	}
1776 
1777 	mutex_unlock(&dev->struct_mutex);
1778 
1779 out:
1780 	kfree(data);
1781 	return 0;
1782 }
1783 
1784 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1785 {
1786 	struct drm_info_node *node = m->private;
1787 	struct drm_device *dev = node->minor->dev;
1788 	struct intel_fbdev *ifbdev = NULL;
1789 	struct intel_framebuffer *fb;
1790 
1791 #ifdef CONFIG_DRM_I915_FBDEV
1792 	struct drm_i915_private *dev_priv = dev->dev_private;
1793 
1794 	ifbdev = dev_priv->fbdev;
1795 	fb = to_intel_framebuffer(ifbdev->helper.fb);
1796 
1797 	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1798 		   fb->base.width,
1799 		   fb->base.height,
1800 		   fb->base.depth,
1801 		   fb->base.bits_per_pixel,
1802 		   fb->base.modifier[0],
1803 		   atomic_read(&fb->base.refcount.refcount));
1804 	describe_obj(m, fb->obj);
1805 	seq_putc(m, '\n');
1806 #endif
1807 
1808 	mutex_lock(&dev->mode_config.fb_lock);
1809 	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1810 		if (ifbdev && &fb->base == ifbdev->helper.fb)
1811 			continue;
1812 
1813 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1814 			   fb->base.width,
1815 			   fb->base.height,
1816 			   fb->base.depth,
1817 			   fb->base.bits_per_pixel,
1818 			   fb->base.modifier[0],
1819 			   atomic_read(&fb->base.refcount.refcount));
1820 		describe_obj(m, fb->obj);
1821 		seq_putc(m, '\n');
1822 	}
1823 	mutex_unlock(&dev->mode_config.fb_lock);
1824 
1825 	return 0;
1826 }
1827 
1828 static void describe_ctx_ringbuf(struct seq_file *m,
1829 				 struct intel_ringbuffer *ringbuf)
1830 {
1831 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1832 		   ringbuf->space, ringbuf->head, ringbuf->tail,
1833 		   ringbuf->last_retired_head);
1834 }
1835 
1836 static int i915_context_status(struct seq_file *m, void *unused)
1837 {
1838 	struct drm_info_node *node = m->private;
1839 	struct drm_device *dev = node->minor->dev;
1840 	struct drm_i915_private *dev_priv = dev->dev_private;
1841 	struct intel_engine_cs *ring;
1842 	struct intel_context *ctx;
1843 	int ret, i;
1844 
1845 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1846 	if (ret)
1847 		return ret;
1848 
1849 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1850 		if (!i915.enable_execlists &&
1851 		    ctx->legacy_hw_ctx.rcs_state == NULL)
1852 			continue;
1853 
1854 		seq_puts(m, "HW context ");
1855 		describe_ctx(m, ctx);
1856 		for_each_ring(ring, dev_priv, i) {
1857 			if (ring->default_context == ctx)
1858 				seq_printf(m, "(default context %s) ",
1859 					   ring->name);
1860 		}
1861 
1862 		if (i915.enable_execlists) {
1863 			seq_putc(m, '\n');
1864 			for_each_ring(ring, dev_priv, i) {
1865 				struct drm_i915_gem_object *ctx_obj =
1866 					ctx->engine[i].state;
1867 				struct intel_ringbuffer *ringbuf =
1868 					ctx->engine[i].ringbuf;
1869 
1870 				seq_printf(m, "%s: ", ring->name);
1871 				if (ctx_obj)
1872 					describe_obj(m, ctx_obj);
1873 				if (ringbuf)
1874 					describe_ctx_ringbuf(m, ringbuf);
1875 				seq_putc(m, '\n');
1876 			}
1877 		} else {
1878 			describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1879 		}
1880 
1881 		seq_putc(m, '\n');
1882 	}
1883 
1884 	mutex_unlock(&dev->struct_mutex);
1885 
1886 	return 0;
1887 }
1888 
1889 static void i915_dump_lrc_obj(struct seq_file *m,
1890 			      struct intel_engine_cs *ring,
1891 			      struct drm_i915_gem_object *ctx_obj)
1892 {
1893 	struct page *page;
1894 	uint32_t *reg_state;
1895 	int j;
1896 	unsigned long ggtt_offset = 0;
1897 
1898 	if (ctx_obj == NULL) {
1899 		seq_printf(m, "Context on %s with no gem object\n",
1900 			   ring->name);
1901 		return;
1902 	}
1903 
1904 	seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1905 		   intel_execlists_ctx_id(ctx_obj));
1906 
1907 	if (!i915_gem_obj_ggtt_bound(ctx_obj))
1908 		seq_puts(m, "\tNot bound in GGTT\n");
1909 	else
1910 		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
1911 
1912 	if (i915_gem_object_get_pages(ctx_obj)) {
1913 		seq_puts(m, "\tFailed to get pages for context object\n");
1914 		return;
1915 	}
1916 
1917 	page = i915_gem_object_get_page(ctx_obj, 1);
1918 	if (!WARN_ON(page == NULL)) {
1919 		reg_state = kmap_atomic(page);
1920 
1921 		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1922 			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1923 				   ggtt_offset + 4096 + (j * 4),
1924 				   reg_state[j], reg_state[j + 1],
1925 				   reg_state[j + 2], reg_state[j + 3]);
1926 		}
1927 		kunmap_atomic(reg_state);
1928 	}
1929 
1930 	seq_putc(m, '\n');
1931 }
1932 
1933 static int i915_dump_lrc(struct seq_file *m, void *unused)
1934 {
1935 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1936 	struct drm_device *dev = node->minor->dev;
1937 	struct drm_i915_private *dev_priv = dev->dev_private;
1938 	struct intel_engine_cs *ring;
1939 	struct intel_context *ctx;
1940 	int ret, i;
1941 
1942 	if (!i915.enable_execlists) {
1943 		seq_printf(m, "Logical Ring Contexts are disabled\n");
1944 		return 0;
1945 	}
1946 
1947 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1948 	if (ret)
1949 		return ret;
1950 
1951 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1952 		for_each_ring(ring, dev_priv, i) {
1953 			if (ring->default_context != ctx)
1954 				i915_dump_lrc_obj(m, ring,
1955 						  ctx->engine[i].state);
1956 		}
1957 	}
1958 
1959 	mutex_unlock(&dev->struct_mutex);
1960 
1961 	return 0;
1962 }
1963 
1964 static int i915_execlists(struct seq_file *m, void *data)
1965 {
1966 	struct drm_info_node *node = (struct drm_info_node *)m->private;
1967 	struct drm_device *dev = node->minor->dev;
1968 	struct drm_i915_private *dev_priv = dev->dev_private;
1969 	struct intel_engine_cs *ring;
1970 	u32 status_pointer;
1971 	u8 read_pointer;
1972 	u8 write_pointer;
1973 	u32 status;
1974 	u32 ctx_id;
1975 	struct list_head *cursor;
1976 	int ring_id, i;
1977 	int ret;
1978 
1979 	if (!i915.enable_execlists) {
1980 		seq_puts(m, "Logical Ring Contexts are disabled\n");
1981 		return 0;
1982 	}
1983 
1984 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1985 	if (ret)
1986 		return ret;
1987 
1988 	intel_runtime_pm_get(dev_priv);
1989 
1990 	for_each_ring(ring, dev_priv, ring_id) {
1991 		struct drm_i915_gem_request *head_req = NULL;
1992 		int count = 0;
1993 		unsigned long flags;
1994 
1995 		seq_printf(m, "%s\n", ring->name);
1996 
1997 		status = I915_READ(RING_EXECLIST_STATUS(ring));
1998 		ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
1999 		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
2000 			   status, ctx_id);
2001 
2002 		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
2003 		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
2004 
2005 		read_pointer = ring->next_context_status_buffer;
2006 		write_pointer = status_pointer & 0x07;
2007 		if (read_pointer > write_pointer)
2008 			write_pointer += 6;
2009 		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2010 			   read_pointer, write_pointer);
2011 
2012 		for (i = 0; i < 6; i++) {
2013 			status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
2014 			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
2015 
2016 			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
2017 				   i, status, ctx_id);
2018 		}
2019 
2020 		spin_lock_irqsave(&ring->execlist_lock, flags);
2021 		list_for_each(cursor, &ring->execlist_queue)
2022 			count++;
2023 		head_req = list_first_entry_or_null(&ring->execlist_queue,
2024 				struct drm_i915_gem_request, execlist_link);
2025 		spin_unlock_irqrestore(&ring->execlist_lock, flags);
2026 
2027 		seq_printf(m, "\t%d requests in queue\n", count);
2028 		if (head_req) {
2029 			struct drm_i915_gem_object *ctx_obj;
2030 
2031 			ctx_obj = head_req->ctx->engine[ring_id].state;
2032 			seq_printf(m, "\tHead request id: %u\n",
2033 				   intel_execlists_ctx_id(ctx_obj));
2034 			seq_printf(m, "\tHead request tail: %u\n",
2035 				   head_req->tail);
2036 		}
2037 
2038 		seq_putc(m, '\n');
2039 	}
2040 
2041 	intel_runtime_pm_put(dev_priv);
2042 	mutex_unlock(&dev->struct_mutex);
2043 
2044 	return 0;
2045 }
2046 
2047 static const char *swizzle_string(unsigned swizzle)
2048 {
2049 	switch (swizzle) {
2050 	case I915_BIT_6_SWIZZLE_NONE:
2051 		return "none";
2052 	case I915_BIT_6_SWIZZLE_9:
2053 		return "bit9";
2054 	case I915_BIT_6_SWIZZLE_9_10:
2055 		return "bit9/bit10";
2056 	case I915_BIT_6_SWIZZLE_9_11:
2057 		return "bit9/bit11";
2058 	case I915_BIT_6_SWIZZLE_9_10_11:
2059 		return "bit9/bit10/bit11";
2060 	case I915_BIT_6_SWIZZLE_9_17:
2061 		return "bit9/bit17";
2062 	case I915_BIT_6_SWIZZLE_9_10_17:
2063 		return "bit9/bit10/bit17";
2064 	case I915_BIT_6_SWIZZLE_UNKNOWN:
2065 		return "unknown";
2066 	}
2067 
2068 	return "bug";
2069 }
2070 
2071 static int i915_swizzle_info(struct seq_file *m, void *data)
2072 {
2073 	struct drm_info_node *node = m->private;
2074 	struct drm_device *dev = node->minor->dev;
2075 	struct drm_i915_private *dev_priv = dev->dev_private;
2076 	int ret;
2077 
2078 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2079 	if (ret)
2080 		return ret;
2081 	intel_runtime_pm_get(dev_priv);
2082 
2083 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2084 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2085 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2086 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2087 
2088 	if (IS_GEN3(dev) || IS_GEN4(dev)) {
2089 		seq_printf(m, "DDC = 0x%08x\n",
2090 			   I915_READ(DCC));
2091 		seq_printf(m, "DDC2 = 0x%08x\n",
2092 			   I915_READ(DCC2));
2093 		seq_printf(m, "C0DRB3 = 0x%04x\n",
2094 			   I915_READ16(C0DRB3));
2095 		seq_printf(m, "C1DRB3 = 0x%04x\n",
2096 			   I915_READ16(C1DRB3));
2097 	} else if (INTEL_INFO(dev)->gen >= 6) {
2098 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2099 			   I915_READ(MAD_DIMM_C0));
2100 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2101 			   I915_READ(MAD_DIMM_C1));
2102 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2103 			   I915_READ(MAD_DIMM_C2));
2104 		seq_printf(m, "TILECTL = 0x%08x\n",
2105 			   I915_READ(TILECTL));
2106 		if (INTEL_INFO(dev)->gen >= 8)
2107 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2108 				   I915_READ(GAMTARBMODE));
2109 		else
2110 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2111 				   I915_READ(ARB_MODE));
2112 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2113 			   I915_READ(DISP_ARB_CTL));
2114 	}
2115 
2116 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2117 		seq_puts(m, "L-shaped memory detected\n");
2118 
2119 	intel_runtime_pm_put(dev_priv);
2120 	mutex_unlock(&dev->struct_mutex);
2121 
2122 	return 0;
2123 }
2124 
2125 static int per_file_ctx(int id, void *ptr, void *data)
2126 {
2127 	struct intel_context *ctx = ptr;
2128 	struct seq_file *m = data;
2129 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2130 
2131 	if (!ppgtt) {
2132 		seq_printf(m, "  no ppgtt for context %d\n",
2133 			   ctx->user_handle);
2134 		return 0;
2135 	}
2136 
2137 	if (i915_gem_context_is_default(ctx))
2138 		seq_puts(m, "  default context:\n");
2139 	else
2140 		seq_printf(m, "  context %d:\n", ctx->user_handle);
2141 	ppgtt->debug_dump(ppgtt, m);
2142 
2143 	return 0;
2144 }
2145 
2146 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2147 {
2148 	struct drm_i915_private *dev_priv = dev->dev_private;
2149 	struct intel_engine_cs *ring;
2150 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2151 	int unused, i;
2152 
2153 	if (!ppgtt)
2154 		return;
2155 
2156 	seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
2157 	seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
2158 	for_each_ring(ring, dev_priv, unused) {
2159 		seq_printf(m, "%s\n", ring->name);
2160 		for (i = 0; i < 4; i++) {
2161 			u32 offset = 0x270 + i * 8;
2162 			u64 pdp = I915_READ(ring->mmio_base + offset + 4);
2163 			pdp <<= 32;
2164 			pdp |= I915_READ(ring->mmio_base + offset);
2165 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2166 		}
2167 	}
2168 }
2169 
2170 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2171 {
2172 	struct drm_i915_private *dev_priv = dev->dev_private;
2173 	struct intel_engine_cs *ring;
2174 	struct drm_file *file;
2175 	int i;
2176 
2177 	if (INTEL_INFO(dev)->gen == 6)
2178 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2179 
2180 	for_each_ring(ring, dev_priv, i) {
2181 		seq_printf(m, "%s\n", ring->name);
2182 		if (INTEL_INFO(dev)->gen == 7)
2183 			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
2184 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
2185 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
2186 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
2187 	}
2188 	if (dev_priv->mm.aliasing_ppgtt) {
2189 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2190 
2191 		seq_puts(m, "aliasing PPGTT:\n");
2192 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.pd_offset);
2193 
2194 		ppgtt->debug_dump(ppgtt, m);
2195 	}
2196 
2197 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2198 		struct drm_i915_file_private *file_priv = file->driver_priv;
2199 
2200 		seq_printf(m, "proc: %s\n",
2201 			   get_pid_task(file->pid, PIDTYPE_PID)->comm);
2202 		idr_for_each(&file_priv->context_idr, per_file_ctx, m);
2203 	}
2204 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2205 }
2206 
2207 static int i915_ppgtt_info(struct seq_file *m, void *data)
2208 {
2209 	struct drm_info_node *node = m->private;
2210 	struct drm_device *dev = node->minor->dev;
2211 	struct drm_i915_private *dev_priv = dev->dev_private;
2212 
2213 	int ret = mutex_lock_interruptible(&dev->struct_mutex);
2214 	if (ret)
2215 		return ret;
2216 	intel_runtime_pm_get(dev_priv);
2217 
2218 	if (INTEL_INFO(dev)->gen >= 8)
2219 		gen8_ppgtt_info(m, dev);
2220 	else if (INTEL_INFO(dev)->gen >= 6)
2221 		gen6_ppgtt_info(m, dev);
2222 
2223 	intel_runtime_pm_put(dev_priv);
2224 	mutex_unlock(&dev->struct_mutex);
2225 
2226 	return 0;
2227 }
2228 
2229 static int i915_llc(struct seq_file *m, void *data)
2230 {
2231 	struct drm_info_node *node = m->private;
2232 	struct drm_device *dev = node->minor->dev;
2233 	struct drm_i915_private *dev_priv = dev->dev_private;
2234 
2235 	/* Size calculation for LLC is a bit of a pain. Ignore for now. */
2236 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2237 	seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
2238 
2239 	return 0;
2240 }
2241 
2242 static int i915_edp_psr_status(struct seq_file *m, void *data)
2243 {
2244 	struct drm_info_node *node = m->private;
2245 	struct drm_device *dev = node->minor->dev;
2246 	struct drm_i915_private *dev_priv = dev->dev_private;
2247 	u32 psrperf = 0;
2248 	u32 stat[3];
2249 	enum pipe pipe;
2250 	bool enabled = false;
2251 
2252 	if (!HAS_PSR(dev)) {
2253 		seq_puts(m, "PSR not supported\n");
2254 		return 0;
2255 	}
2256 
2257 	intel_runtime_pm_get(dev_priv);
2258 
2259 	mutex_lock(&dev_priv->psr.lock);
2260 	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2261 	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2262 	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2263 	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2264 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2265 		   dev_priv->psr.busy_frontbuffer_bits);
2266 	seq_printf(m, "Re-enable work scheduled: %s\n",
2267 		   yesno(work_busy(&dev_priv->psr.work.work)));
2268 
2269 	if (HAS_DDI(dev))
2270 		enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
2271 	else {
2272 		for_each_pipe(dev_priv, pipe) {
2273 			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2274 				VLV_EDP_PSR_CURR_STATE_MASK;
2275 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2276 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2277 				enabled = true;
2278 		}
2279 	}
2280 	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2281 
2282 	if (!HAS_DDI(dev))
2283 		for_each_pipe(dev_priv, pipe) {
2284 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2285 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2286 				seq_printf(m, " pipe %c", pipe_name(pipe));
2287 		}
2288 	seq_puts(m, "\n");
2289 
2290 	seq_printf(m, "Link standby: %s\n",
2291 		   yesno((bool)dev_priv->psr.link_standby));
2292 
2293 	/* CHV PSR has no kind of performance counter */
2294 	if (HAS_DDI(dev)) {
2295 		psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
2296 			EDP_PSR_PERF_CNT_MASK;
2297 
2298 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2299 	}
2300 	mutex_unlock(&dev_priv->psr.lock);
2301 
2302 	intel_runtime_pm_put(dev_priv);
2303 	return 0;
2304 }
2305 
2306 static int i915_sink_crc(struct seq_file *m, void *data)
2307 {
2308 	struct drm_info_node *node = m->private;
2309 	struct drm_device *dev = node->minor->dev;
2310 	struct intel_encoder *encoder;
2311 	struct intel_connector *connector;
2312 	struct intel_dp *intel_dp = NULL;
2313 	int ret;
2314 	u8 crc[6];
2315 
2316 	drm_modeset_lock_all(dev);
2317 	for_each_intel_connector(dev, connector) {
2318 
2319 		if (connector->base.dpms != DRM_MODE_DPMS_ON)
2320 			continue;
2321 
2322 		if (!connector->base.encoder)
2323 			continue;
2324 
2325 		encoder = to_intel_encoder(connector->base.encoder);
2326 		if (encoder->type != INTEL_OUTPUT_EDP)
2327 			continue;
2328 
2329 		intel_dp = enc_to_intel_dp(&encoder->base);
2330 
2331 		ret = intel_dp_sink_crc(intel_dp, crc);
2332 		if (ret)
2333 			goto out;
2334 
2335 		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2336 			   crc[0], crc[1], crc[2],
2337 			   crc[3], crc[4], crc[5]);
2338 		goto out;
2339 	}
2340 	ret = -ENODEV;
2341 out:
2342 	drm_modeset_unlock_all(dev);
2343 	return ret;
2344 }
2345 
2346 static int i915_energy_uJ(struct seq_file *m, void *data)
2347 {
2348 	struct drm_info_node *node = m->private;
2349 	struct drm_device *dev = node->minor->dev;
2350 	struct drm_i915_private *dev_priv = dev->dev_private;
2351 	u64 power;
2352 	u32 units;
2353 
2354 	if (INTEL_INFO(dev)->gen < 6)
2355 		return -ENODEV;
2356 
2357 	intel_runtime_pm_get(dev_priv);
2358 
2359 	rdmsrl(MSR_RAPL_POWER_UNIT, power);
2360 	power = (power & 0x1f00) >> 8;
2361 	units = 1000000 / (1 << power); /* convert to uJ */
2362 	power = I915_READ(MCH_SECP_NRG_STTS);
2363 	power *= units;
2364 
2365 	intel_runtime_pm_put(dev_priv);
2366 
2367 	seq_printf(m, "%llu", (long long unsigned)power);
2368 
2369 	return 0;
2370 }
2371 
2372 static int i915_pc8_status(struct seq_file *m, void *unused)
2373 {
2374 	struct drm_info_node *node = m->private;
2375 	struct drm_device *dev = node->minor->dev;
2376 	struct drm_i915_private *dev_priv = dev->dev_private;
2377 
2378 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2379 		seq_puts(m, "not supported\n");
2380 		return 0;
2381 	}
2382 
2383 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2384 	seq_printf(m, "IRQs disabled: %s\n",
2385 		   yesno(!intel_irqs_enabled(dev_priv)));
2386 
2387 	return 0;
2388 }
2389 
2390 static const char *power_domain_str(enum intel_display_power_domain domain)
2391 {
2392 	switch (domain) {
2393 	case POWER_DOMAIN_PIPE_A:
2394 		return "PIPE_A";
2395 	case POWER_DOMAIN_PIPE_B:
2396 		return "PIPE_B";
2397 	case POWER_DOMAIN_PIPE_C:
2398 		return "PIPE_C";
2399 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
2400 		return "PIPE_A_PANEL_FITTER";
2401 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
2402 		return "PIPE_B_PANEL_FITTER";
2403 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
2404 		return "PIPE_C_PANEL_FITTER";
2405 	case POWER_DOMAIN_TRANSCODER_A:
2406 		return "TRANSCODER_A";
2407 	case POWER_DOMAIN_TRANSCODER_B:
2408 		return "TRANSCODER_B";
2409 	case POWER_DOMAIN_TRANSCODER_C:
2410 		return "TRANSCODER_C";
2411 	case POWER_DOMAIN_TRANSCODER_EDP:
2412 		return "TRANSCODER_EDP";
2413 	case POWER_DOMAIN_PORT_DDI_A_2_LANES:
2414 		return "PORT_DDI_A_2_LANES";
2415 	case POWER_DOMAIN_PORT_DDI_A_4_LANES:
2416 		return "PORT_DDI_A_4_LANES";
2417 	case POWER_DOMAIN_PORT_DDI_B_2_LANES:
2418 		return "PORT_DDI_B_2_LANES";
2419 	case POWER_DOMAIN_PORT_DDI_B_4_LANES:
2420 		return "PORT_DDI_B_4_LANES";
2421 	case POWER_DOMAIN_PORT_DDI_C_2_LANES:
2422 		return "PORT_DDI_C_2_LANES";
2423 	case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2424 		return "PORT_DDI_C_4_LANES";
2425 	case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2426 		return "PORT_DDI_D_2_LANES";
2427 	case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2428 		return "PORT_DDI_D_4_LANES";
2429 	case POWER_DOMAIN_PORT_DSI:
2430 		return "PORT_DSI";
2431 	case POWER_DOMAIN_PORT_CRT:
2432 		return "PORT_CRT";
2433 	case POWER_DOMAIN_PORT_OTHER:
2434 		return "PORT_OTHER";
2435 	case POWER_DOMAIN_VGA:
2436 		return "VGA";
2437 	case POWER_DOMAIN_AUDIO:
2438 		return "AUDIO";
2439 	case POWER_DOMAIN_PLLS:
2440 		return "PLLS";
2441 	case POWER_DOMAIN_AUX_A:
2442 		return "AUX_A";
2443 	case POWER_DOMAIN_AUX_B:
2444 		return "AUX_B";
2445 	case POWER_DOMAIN_AUX_C:
2446 		return "AUX_C";
2447 	case POWER_DOMAIN_AUX_D:
2448 		return "AUX_D";
2449 	case POWER_DOMAIN_INIT:
2450 		return "INIT";
2451 	default:
2452 		MISSING_CASE(domain);
2453 		return "?";
2454 	}
2455 }
2456 
2457 static int i915_power_domain_info(struct seq_file *m, void *unused)
2458 {
2459 	struct drm_info_node *node = m->private;
2460 	struct drm_device *dev = node->minor->dev;
2461 	struct drm_i915_private *dev_priv = dev->dev_private;
2462 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2463 	int i;
2464 
2465 	mutex_lock(&power_domains->lock);
2466 
2467 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2468 	for (i = 0; i < power_domains->power_well_count; i++) {
2469 		struct i915_power_well *power_well;
2470 		enum intel_display_power_domain power_domain;
2471 
2472 		power_well = &power_domains->power_wells[i];
2473 		seq_printf(m, "%-25s %d\n", power_well->name,
2474 			   power_well->count);
2475 
2476 		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2477 		     power_domain++) {
2478 			if (!(BIT(power_domain) & power_well->domains))
2479 				continue;
2480 
2481 			seq_printf(m, "  %-23s %d\n",
2482 				 power_domain_str(power_domain),
2483 				 power_domains->domain_use_count[power_domain]);
2484 		}
2485 	}
2486 
2487 	mutex_unlock(&power_domains->lock);
2488 
2489 	return 0;
2490 }
2491 
2492 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2493 				 struct drm_display_mode *mode)
2494 {
2495 	int i;
2496 
2497 	for (i = 0; i < tabs; i++)
2498 		seq_putc(m, '\t');
2499 
2500 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2501 		   mode->base.id, mode->name,
2502 		   mode->vrefresh, mode->clock,
2503 		   mode->hdisplay, mode->hsync_start,
2504 		   mode->hsync_end, mode->htotal,
2505 		   mode->vdisplay, mode->vsync_start,
2506 		   mode->vsync_end, mode->vtotal,
2507 		   mode->type, mode->flags);
2508 }
2509 
2510 static void intel_encoder_info(struct seq_file *m,
2511 			       struct intel_crtc *intel_crtc,
2512 			       struct intel_encoder *intel_encoder)
2513 {
2514 	struct drm_info_node *node = m->private;
2515 	struct drm_device *dev = node->minor->dev;
2516 	struct drm_crtc *crtc = &intel_crtc->base;
2517 	struct intel_connector *intel_connector;
2518 	struct drm_encoder *encoder;
2519 
2520 	encoder = &intel_encoder->base;
2521 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2522 		   encoder->base.id, encoder->name);
2523 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2524 		struct drm_connector *connector = &intel_connector->base;
2525 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2526 			   connector->base.id,
2527 			   connector->name,
2528 			   drm_get_connector_status_name(connector->status));
2529 		if (connector->status == connector_status_connected) {
2530 			struct drm_display_mode *mode = &crtc->mode;
2531 			seq_printf(m, ", mode:\n");
2532 			intel_seq_print_mode(m, 2, mode);
2533 		} else {
2534 			seq_putc(m, '\n');
2535 		}
2536 	}
2537 }
2538 
2539 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2540 {
2541 	struct drm_info_node *node = m->private;
2542 	struct drm_device *dev = node->minor->dev;
2543 	struct drm_crtc *crtc = &intel_crtc->base;
2544 	struct intel_encoder *intel_encoder;
2545 
2546 	if (crtc->primary->fb)
2547 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2548 			   crtc->primary->fb->base.id, crtc->x, crtc->y,
2549 			   crtc->primary->fb->width, crtc->primary->fb->height);
2550 	else
2551 		seq_puts(m, "\tprimary plane disabled\n");
2552 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2553 		intel_encoder_info(m, intel_crtc, intel_encoder);
2554 }
2555 
2556 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2557 {
2558 	struct drm_display_mode *mode = panel->fixed_mode;
2559 
2560 	seq_printf(m, "\tfixed mode:\n");
2561 	intel_seq_print_mode(m, 2, mode);
2562 }
2563 
2564 static void intel_dp_info(struct seq_file *m,
2565 			  struct intel_connector *intel_connector)
2566 {
2567 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2568 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2569 
2570 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2571 	seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
2572 		   "no");
2573 	if (intel_encoder->type == INTEL_OUTPUT_EDP)
2574 		intel_panel_info(m, &intel_connector->panel);
2575 }
2576 
2577 static void intel_hdmi_info(struct seq_file *m,
2578 			    struct intel_connector *intel_connector)
2579 {
2580 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2581 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2582 
2583 	seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
2584 		   "no");
2585 }
2586 
2587 static void intel_lvds_info(struct seq_file *m,
2588 			    struct intel_connector *intel_connector)
2589 {
2590 	intel_panel_info(m, &intel_connector->panel);
2591 }
2592 
2593 static void intel_connector_info(struct seq_file *m,
2594 				 struct drm_connector *connector)
2595 {
2596 	struct intel_connector *intel_connector = to_intel_connector(connector);
2597 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2598 	struct drm_display_mode *mode;
2599 
2600 	seq_printf(m, "connector %d: type %s, status: %s\n",
2601 		   connector->base.id, connector->name,
2602 		   drm_get_connector_status_name(connector->status));
2603 	if (connector->status == connector_status_connected) {
2604 		seq_printf(m, "\tname: %s\n", connector->display_info.name);
2605 		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2606 			   connector->display_info.width_mm,
2607 			   connector->display_info.height_mm);
2608 		seq_printf(m, "\tsubpixel order: %s\n",
2609 			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2610 		seq_printf(m, "\tCEA rev: %d\n",
2611 			   connector->display_info.cea_rev);
2612 	}
2613 	if (intel_encoder) {
2614 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2615 		    intel_encoder->type == INTEL_OUTPUT_EDP)
2616 			intel_dp_info(m, intel_connector);
2617 		else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2618 			intel_hdmi_info(m, intel_connector);
2619 		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2620 			intel_lvds_info(m, intel_connector);
2621 	}
2622 
2623 	seq_printf(m, "\tmodes:\n");
2624 	list_for_each_entry(mode, &connector->modes, head)
2625 		intel_seq_print_mode(m, 2, mode);
2626 }
2627 
2628 static bool cursor_active(struct drm_device *dev, int pipe)
2629 {
2630 	struct drm_i915_private *dev_priv = dev->dev_private;
2631 	u32 state;
2632 
2633 	if (IS_845G(dev) || IS_I865G(dev))
2634 		state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
2635 	else
2636 		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2637 
2638 	return state;
2639 }
2640 
2641 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2642 {
2643 	struct drm_i915_private *dev_priv = dev->dev_private;
2644 	u32 pos;
2645 
2646 	pos = I915_READ(CURPOS(pipe));
2647 
2648 	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2649 	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2650 		*x = -*x;
2651 
2652 	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2653 	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2654 		*y = -*y;
2655 
2656 	return cursor_active(dev, pipe);
2657 }
2658 
2659 static int i915_display_info(struct seq_file *m, void *unused)
2660 {
2661 	struct drm_info_node *node = m->private;
2662 	struct drm_device *dev = node->minor->dev;
2663 	struct drm_i915_private *dev_priv = dev->dev_private;
2664 	struct intel_crtc *crtc;
2665 	struct drm_connector *connector;
2666 
2667 	intel_runtime_pm_get(dev_priv);
2668 	drm_modeset_lock_all(dev);
2669 	seq_printf(m, "CRTC info\n");
2670 	seq_printf(m, "---------\n");
2671 	for_each_intel_crtc(dev, crtc) {
2672 		bool active;
2673 		int x, y;
2674 
2675 		seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2676 			   crtc->base.base.id, pipe_name(crtc->pipe),
2677 			   yesno(crtc->active), crtc->config->pipe_src_w,
2678 			   crtc->config->pipe_src_h);
2679 		if (crtc->active) {
2680 			intel_crtc_info(m, crtc);
2681 
2682 			active = cursor_position(dev, crtc->pipe, &x, &y);
2683 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2684 				   yesno(crtc->cursor_base),
2685 				   x, y, crtc->base.cursor->state->crtc_w,
2686 				   crtc->base.cursor->state->crtc_h,
2687 				   crtc->cursor_addr, yesno(active));
2688 		}
2689 
2690 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2691 			   yesno(!crtc->cpu_fifo_underrun_disabled),
2692 			   yesno(!crtc->pch_fifo_underrun_disabled));
2693 	}
2694 
2695 	seq_printf(m, "\n");
2696 	seq_printf(m, "Connector info\n");
2697 	seq_printf(m, "--------------\n");
2698 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2699 		intel_connector_info(m, connector);
2700 	}
2701 	drm_modeset_unlock_all(dev);
2702 	intel_runtime_pm_put(dev_priv);
2703 
2704 	return 0;
2705 }
2706 
2707 static int i915_semaphore_status(struct seq_file *m, void *unused)
2708 {
2709 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2710 	struct drm_device *dev = node->minor->dev;
2711 	struct drm_i915_private *dev_priv = dev->dev_private;
2712 	struct intel_engine_cs *ring;
2713 	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
2714 	int i, j, ret;
2715 
2716 	if (!i915_semaphore_is_enabled(dev)) {
2717 		seq_puts(m, "Semaphores are disabled\n");
2718 		return 0;
2719 	}
2720 
2721 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2722 	if (ret)
2723 		return ret;
2724 	intel_runtime_pm_get(dev_priv);
2725 
2726 	if (IS_BROADWELL(dev)) {
2727 		struct page *page;
2728 		uint64_t *seqno;
2729 
2730 		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
2731 
2732 		seqno = (uint64_t *)kmap_atomic(page);
2733 		for_each_ring(ring, dev_priv, i) {
2734 			uint64_t offset;
2735 
2736 			seq_printf(m, "%s\n", ring->name);
2737 
2738 			seq_puts(m, "  Last signal:");
2739 			for (j = 0; j < num_rings; j++) {
2740 				offset = i * I915_NUM_RINGS + j;
2741 				seq_printf(m, "0x%08llx (0x%02llx) ",
2742 					   seqno[offset], offset * 8);
2743 			}
2744 			seq_putc(m, '\n');
2745 
2746 			seq_puts(m, "  Last wait:  ");
2747 			for (j = 0; j < num_rings; j++) {
2748 				offset = i + (j * I915_NUM_RINGS);
2749 				seq_printf(m, "0x%08llx (0x%02llx) ",
2750 					   seqno[offset], offset * 8);
2751 			}
2752 			seq_putc(m, '\n');
2753 
2754 		}
2755 		kunmap_atomic(seqno);
2756 	} else {
2757 		seq_puts(m, "  Last signal:");
2758 		for_each_ring(ring, dev_priv, i)
2759 			for (j = 0; j < num_rings; j++)
2760 				seq_printf(m, "0x%08x\n",
2761 					   I915_READ(ring->semaphore.mbox.signal[j]));
2762 		seq_putc(m, '\n');
2763 	}
2764 
2765 	seq_puts(m, "\nSync seqno:\n");
2766 	for_each_ring(ring, dev_priv, i) {
2767 		for (j = 0; j < num_rings; j++) {
2768 			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
2769 		}
2770 		seq_putc(m, '\n');
2771 	}
2772 	seq_putc(m, '\n');
2773 
2774 	intel_runtime_pm_put(dev_priv);
2775 	mutex_unlock(&dev->struct_mutex);
2776 	return 0;
2777 }
2778 
2779 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2780 {
2781 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2782 	struct drm_device *dev = node->minor->dev;
2783 	struct drm_i915_private *dev_priv = dev->dev_private;
2784 	int i;
2785 
2786 	drm_modeset_lock_all(dev);
2787 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2788 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2789 
2790 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2791 		seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
2792 			   pll->config.crtc_mask, pll->active, yesno(pll->on));
2793 		seq_printf(m, " tracked hardware state:\n");
2794 		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
2795 		seq_printf(m, " dpll_md: 0x%08x\n",
2796 			   pll->config.hw_state.dpll_md);
2797 		seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
2798 		seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
2799 		seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
2800 	}
2801 	drm_modeset_unlock_all(dev);
2802 
2803 	return 0;
2804 }
2805 
2806 static int i915_wa_registers(struct seq_file *m, void *unused)
2807 {
2808 	int i;
2809 	int ret;
2810 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2811 	struct drm_device *dev = node->minor->dev;
2812 	struct drm_i915_private *dev_priv = dev->dev_private;
2813 
2814 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2815 	if (ret)
2816 		return ret;
2817 
2818 	intel_runtime_pm_get(dev_priv);
2819 
2820 	seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
2821 	for (i = 0; i < dev_priv->workarounds.count; ++i) {
2822 		u32 addr, mask, value, read;
2823 		bool ok;
2824 
2825 		addr = dev_priv->workarounds.reg[i].addr;
2826 		mask = dev_priv->workarounds.reg[i].mask;
2827 		value = dev_priv->workarounds.reg[i].value;
2828 		read = I915_READ(addr);
2829 		ok = (value & mask) == (read & mask);
2830 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
2831 			   addr, value, mask, read, ok ? "OK" : "FAIL");
2832 	}
2833 
2834 	intel_runtime_pm_put(dev_priv);
2835 	mutex_unlock(&dev->struct_mutex);
2836 
2837 	return 0;
2838 }
2839 
2840 static int i915_ddb_info(struct seq_file *m, void *unused)
2841 {
2842 	struct drm_info_node *node = m->private;
2843 	struct drm_device *dev = node->minor->dev;
2844 	struct drm_i915_private *dev_priv = dev->dev_private;
2845 	struct skl_ddb_allocation *ddb;
2846 	struct skl_ddb_entry *entry;
2847 	enum pipe pipe;
2848 	int plane;
2849 
2850 	if (INTEL_INFO(dev)->gen < 9)
2851 		return 0;
2852 
2853 	drm_modeset_lock_all(dev);
2854 
2855 	ddb = &dev_priv->wm.skl_hw.ddb;
2856 
2857 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2858 
2859 	for_each_pipe(dev_priv, pipe) {
2860 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2861 
2862 		for_each_plane(dev_priv, pipe, plane) {
2863 			entry = &ddb->plane[pipe][plane];
2864 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
2865 				   entry->start, entry->end,
2866 				   skl_ddb_entry_size(entry));
2867 		}
2868 
2869 		entry = &ddb->cursor[pipe];
2870 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
2871 			   entry->end, skl_ddb_entry_size(entry));
2872 	}
2873 
2874 	drm_modeset_unlock_all(dev);
2875 
2876 	return 0;
2877 }
2878 
2879 static void drrs_status_per_crtc(struct seq_file *m,
2880 		struct drm_device *dev, struct intel_crtc *intel_crtc)
2881 {
2882 	struct intel_encoder *intel_encoder;
2883 	struct drm_i915_private *dev_priv = dev->dev_private;
2884 	struct i915_drrs *drrs = &dev_priv->drrs;
2885 	int vrefresh = 0;
2886 
2887 	for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
2888 		/* Encoder connected on this CRTC */
2889 		switch (intel_encoder->type) {
2890 		case INTEL_OUTPUT_EDP:
2891 			seq_puts(m, "eDP:\n");
2892 			break;
2893 		case INTEL_OUTPUT_DSI:
2894 			seq_puts(m, "DSI:\n");
2895 			break;
2896 		case INTEL_OUTPUT_HDMI:
2897 			seq_puts(m, "HDMI:\n");
2898 			break;
2899 		case INTEL_OUTPUT_DISPLAYPORT:
2900 			seq_puts(m, "DP:\n");
2901 			break;
2902 		default:
2903 			seq_printf(m, "Other encoder (id=%d).\n",
2904 						intel_encoder->type);
2905 			return;
2906 		}
2907 	}
2908 
2909 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
2910 		seq_puts(m, "\tVBT: DRRS_type: Static");
2911 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
2912 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
2913 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
2914 		seq_puts(m, "\tVBT: DRRS_type: None");
2915 	else
2916 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
2917 
2918 	seq_puts(m, "\n\n");
2919 
2920 	if (intel_crtc->config->has_drrs) {
2921 		struct intel_panel *panel;
2922 
2923 		mutex_lock(&drrs->mutex);
2924 		/* DRRS Supported */
2925 		seq_puts(m, "\tDRRS Supported: Yes\n");
2926 
2927 		/* disable_drrs() will make drrs->dp NULL */
2928 		if (!drrs->dp) {
2929 			seq_puts(m, "Idleness DRRS: Disabled");
2930 			mutex_unlock(&drrs->mutex);
2931 			return;
2932 		}
2933 
2934 		panel = &drrs->dp->attached_connector->panel;
2935 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
2936 					drrs->busy_frontbuffer_bits);
2937 
2938 		seq_puts(m, "\n\t\t");
2939 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
2940 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
2941 			vrefresh = panel->fixed_mode->vrefresh;
2942 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
2943 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
2944 			vrefresh = panel->downclock_mode->vrefresh;
2945 		} else {
2946 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
2947 						drrs->refresh_rate_type);
2948 			mutex_unlock(&drrs->mutex);
2949 			return;
2950 		}
2951 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
2952 
2953 		seq_puts(m, "\n\t\t");
2954 		mutex_unlock(&drrs->mutex);
2955 	} else {
2956 		/* DRRS not supported. Print the VBT parameter*/
2957 		seq_puts(m, "\tDRRS Supported : No");
2958 	}
2959 	seq_puts(m, "\n");
2960 }
2961 
2962 static int i915_drrs_status(struct seq_file *m, void *unused)
2963 {
2964 	struct drm_info_node *node = m->private;
2965 	struct drm_device *dev = node->minor->dev;
2966 	struct intel_crtc *intel_crtc;
2967 	int active_crtc_cnt = 0;
2968 
2969 	for_each_intel_crtc(dev, intel_crtc) {
2970 		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
2971 
2972 		if (intel_crtc->active) {
2973 			active_crtc_cnt++;
2974 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
2975 
2976 			drrs_status_per_crtc(m, dev, intel_crtc);
2977 		}
2978 
2979 		drm_modeset_unlock(&intel_crtc->base.mutex);
2980 	}
2981 
2982 	if (!active_crtc_cnt)
2983 		seq_puts(m, "No active crtc found\n");
2984 
2985 	return 0;
2986 }
2987 
2988 struct pipe_crc_info {
2989 	const char *name;
2990 	struct drm_device *dev;
2991 	enum pipe pipe;
2992 };
2993 
2994 static int i915_dp_mst_info(struct seq_file *m, void *unused)
2995 {
2996 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2997 	struct drm_device *dev = node->minor->dev;
2998 	struct drm_encoder *encoder;
2999 	struct intel_encoder *intel_encoder;
3000 	struct intel_digital_port *intel_dig_port;
3001 	drm_modeset_lock_all(dev);
3002 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3003 		intel_encoder = to_intel_encoder(encoder);
3004 		if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
3005 			continue;
3006 		intel_dig_port = enc_to_dig_port(encoder);
3007 		if (!intel_dig_port->dp.can_mst)
3008 			continue;
3009 
3010 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3011 	}
3012 	drm_modeset_unlock_all(dev);
3013 	return 0;
3014 }
3015 
3016 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3017 {
3018 	struct pipe_crc_info *info = inode->i_private;
3019 	struct drm_i915_private *dev_priv = info->dev->dev_private;
3020 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3021 
3022 	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
3023 		return -ENODEV;
3024 
3025 	spin_lock_irq(&pipe_crc->lock);
3026 
3027 	if (pipe_crc->opened) {
3028 		spin_unlock_irq(&pipe_crc->lock);
3029 		return -EBUSY; /* already open */
3030 	}
3031 
3032 	pipe_crc->opened = true;
3033 	filep->private_data = inode->i_private;
3034 
3035 	spin_unlock_irq(&pipe_crc->lock);
3036 
3037 	return 0;
3038 }
3039 
3040 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3041 {
3042 	struct pipe_crc_info *info = inode->i_private;
3043 	struct drm_i915_private *dev_priv = info->dev->dev_private;
3044 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3045 
3046 	spin_lock_irq(&pipe_crc->lock);
3047 	pipe_crc->opened = false;
3048 	spin_unlock_irq(&pipe_crc->lock);
3049 
3050 	return 0;
3051 }
3052 
3053 /* (6 fields, 8 chars each, space separated (5) + '\n') */
3054 #define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
3055 /* account for \'0' */
3056 #define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
3057 
3058 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
3059 {
3060 	assert_spin_locked(&pipe_crc->lock);
3061 	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3062 			INTEL_PIPE_CRC_ENTRIES_NR);
3063 }
3064 
3065 static ssize_t
3066 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3067 		   loff_t *pos)
3068 {
3069 	struct pipe_crc_info *info = filep->private_data;
3070 	struct drm_device *dev = info->dev;
3071 	struct drm_i915_private *dev_priv = dev->dev_private;
3072 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3073 	char buf[PIPE_CRC_BUFFER_LEN];
3074 	int n_entries;
3075 	ssize_t bytes_read;
3076 
3077 	/*
3078 	 * Don't allow user space to provide buffers not big enough to hold
3079 	 * a line of data.
3080 	 */
3081 	if (count < PIPE_CRC_LINE_LEN)
3082 		return -EINVAL;
3083 
3084 	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
3085 		return 0;
3086 
3087 	/* nothing to read */
3088 	spin_lock_irq(&pipe_crc->lock);
3089 	while (pipe_crc_data_count(pipe_crc) == 0) {
3090 		int ret;
3091 
3092 		if (filep->f_flags & O_NONBLOCK) {
3093 			spin_unlock_irq(&pipe_crc->lock);
3094 			return -EAGAIN;
3095 		}
3096 
3097 		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
3098 				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
3099 		if (ret) {
3100 			spin_unlock_irq(&pipe_crc->lock);
3101 			return ret;
3102 		}
3103 	}
3104 
3105 	/* We now have one or more entries to read */
3106 	n_entries = count / PIPE_CRC_LINE_LEN;
3107 
3108 	bytes_read = 0;
3109 	while (n_entries > 0) {
3110 		struct intel_pipe_crc_entry *entry =
3111 			&pipe_crc->entries[pipe_crc->tail];
3112 		int ret;
3113 
3114 		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3115 			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
3116 			break;
3117 
3118 		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
3119 		pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
3120 
3121 		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
3122 				       "%8u %8x %8x %8x %8x %8x\n",
3123 				       entry->frame, entry->crc[0],
3124 				       entry->crc[1], entry->crc[2],
3125 				       entry->crc[3], entry->crc[4]);
3126 
3127 		spin_unlock_irq(&pipe_crc->lock);
3128 
3129 		ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
3130 		if (ret == PIPE_CRC_LINE_LEN)
3131 			return -EFAULT;
3132 
3133 		user_buf += PIPE_CRC_LINE_LEN;
3134 		n_entries--;
3135 
3136 		spin_lock_irq(&pipe_crc->lock);
3137 	}
3138 
3139 	spin_unlock_irq(&pipe_crc->lock);
3140 
3141 	return bytes_read;
3142 }
3143 
3144 static const struct file_operations i915_pipe_crc_fops = {
3145 	.owner = THIS_MODULE,
3146 	.open = i915_pipe_crc_open,
3147 	.read = i915_pipe_crc_read,
3148 	.release = i915_pipe_crc_release,
3149 };
3150 
3151 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
3152 	{
3153 		.name = "i915_pipe_A_crc",
3154 		.pipe = PIPE_A,
3155 	},
3156 	{
3157 		.name = "i915_pipe_B_crc",
3158 		.pipe = PIPE_B,
3159 	},
3160 	{
3161 		.name = "i915_pipe_C_crc",
3162 		.pipe = PIPE_C,
3163 	},
3164 };
3165 
3166 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
3167 				enum pipe pipe)
3168 {
3169 	struct drm_device *dev = minor->dev;
3170 	struct dentry *ent;
3171 	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
3172 
3173 	info->dev = dev;
3174 	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
3175 				  &i915_pipe_crc_fops);
3176 	if (!ent)
3177 		return -ENOMEM;
3178 
3179 	return drm_add_fake_info_node(minor, ent, info);
3180 }
3181 
3182 static const char * const pipe_crc_sources[] = {
3183 	"none",
3184 	"plane1",
3185 	"plane2",
3186 	"pf",
3187 	"pipe",
3188 	"TV",
3189 	"DP-B",
3190 	"DP-C",
3191 	"DP-D",
3192 	"auto",
3193 };
3194 
3195 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3196 {
3197 	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
3198 	return pipe_crc_sources[source];
3199 }
3200 
3201 static int display_crc_ctl_show(struct seq_file *m, void *data)
3202 {
3203 	struct drm_device *dev = m->private;
3204 	struct drm_i915_private *dev_priv = dev->dev_private;
3205 	int i;
3206 
3207 	for (i = 0; i < I915_MAX_PIPES; i++)
3208 		seq_printf(m, "%c %s\n", pipe_name(i),
3209 			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
3210 
3211 	return 0;
3212 }
3213 
3214 static int display_crc_ctl_open(struct inode *inode, struct file *file)
3215 {
3216 	struct drm_device *dev = inode->i_private;
3217 
3218 	return single_open(file, display_crc_ctl_show, dev);
3219 }
3220 
3221 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3222 				 uint32_t *val)
3223 {
3224 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3225 		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3226 
3227 	switch (*source) {
3228 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3229 		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
3230 		break;
3231 	case INTEL_PIPE_CRC_SOURCE_NONE:
3232 		*val = 0;
3233 		break;
3234 	default:
3235 		return -EINVAL;
3236 	}
3237 
3238 	return 0;
3239 }
3240 
3241 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
3242 				     enum intel_pipe_crc_source *source)
3243 {
3244 	struct intel_encoder *encoder;
3245 	struct intel_crtc *crtc;
3246 	struct intel_digital_port *dig_port;
3247 	int ret = 0;
3248 
3249 	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3250 
3251 	drm_modeset_lock_all(dev);
3252 	for_each_intel_encoder(dev, encoder) {
3253 		if (!encoder->base.crtc)
3254 			continue;
3255 
3256 		crtc = to_intel_crtc(encoder->base.crtc);
3257 
3258 		if (crtc->pipe != pipe)
3259 			continue;
3260 
3261 		switch (encoder->type) {
3262 		case INTEL_OUTPUT_TVOUT:
3263 			*source = INTEL_PIPE_CRC_SOURCE_TV;
3264 			break;
3265 		case INTEL_OUTPUT_DISPLAYPORT:
3266 		case INTEL_OUTPUT_EDP:
3267 			dig_port = enc_to_dig_port(&encoder->base);
3268 			switch (dig_port->port) {
3269 			case PORT_B:
3270 				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
3271 				break;
3272 			case PORT_C:
3273 				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
3274 				break;
3275 			case PORT_D:
3276 				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
3277 				break;
3278 			default:
3279 				WARN(1, "nonexisting DP port %c\n",
3280 				     port_name(dig_port->port));
3281 				break;
3282 			}
3283 			break;
3284 		default:
3285 			break;
3286 		}
3287 	}
3288 	drm_modeset_unlock_all(dev);
3289 
3290 	return ret;
3291 }
3292 
3293 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
3294 				enum pipe pipe,
3295 				enum intel_pipe_crc_source *source,
3296 				uint32_t *val)
3297 {
3298 	struct drm_i915_private *dev_priv = dev->dev_private;
3299 	bool need_stable_symbols = false;
3300 
3301 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3302 		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3303 		if (ret)
3304 			return ret;
3305 	}
3306 
3307 	switch (*source) {
3308 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3309 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
3310 		break;
3311 	case INTEL_PIPE_CRC_SOURCE_DP_B:
3312 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3313 		need_stable_symbols = true;
3314 		break;
3315 	case INTEL_PIPE_CRC_SOURCE_DP_C:
3316 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3317 		need_stable_symbols = true;
3318 		break;
3319 	case INTEL_PIPE_CRC_SOURCE_DP_D:
3320 		if (!IS_CHERRYVIEW(dev))
3321 			return -EINVAL;
3322 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
3323 		need_stable_symbols = true;
3324 		break;
3325 	case INTEL_PIPE_CRC_SOURCE_NONE:
3326 		*val = 0;
3327 		break;
3328 	default:
3329 		return -EINVAL;
3330 	}
3331 
3332 	/*
3333 	 * When the pipe CRC tap point is after the transcoders we need
3334 	 * to tweak symbol-level features to produce a deterministic series of
3335 	 * symbols for a given frame. We need to reset those features only once
3336 	 * a frame (instead of every nth symbol):
3337 	 *   - DC-balance: used to ensure a better clock recovery from the data
3338 	 *     link (SDVO)
3339 	 *   - DisplayPort scrambling: used for EMI reduction
3340 	 */
3341 	if (need_stable_symbols) {
3342 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3343 
3344 		tmp |= DC_BALANCE_RESET_VLV;
3345 		switch (pipe) {
3346 		case PIPE_A:
3347 			tmp |= PIPE_A_SCRAMBLE_RESET;
3348 			break;
3349 		case PIPE_B:
3350 			tmp |= PIPE_B_SCRAMBLE_RESET;
3351 			break;
3352 		case PIPE_C:
3353 			tmp |= PIPE_C_SCRAMBLE_RESET;
3354 			break;
3355 		default:
3356 			return -EINVAL;
3357 		}
3358 		I915_WRITE(PORT_DFT2_G4X, tmp);
3359 	}
3360 
3361 	return 0;
3362 }
3363 
3364 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3365 				 enum pipe pipe,
3366 				 enum intel_pipe_crc_source *source,
3367 				 uint32_t *val)
3368 {
3369 	struct drm_i915_private *dev_priv = dev->dev_private;
3370 	bool need_stable_symbols = false;
3371 
3372 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3373 		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3374 		if (ret)
3375 			return ret;
3376 	}
3377 
3378 	switch (*source) {
3379 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3380 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3381 		break;
3382 	case INTEL_PIPE_CRC_SOURCE_TV:
3383 		if (!SUPPORTS_TV(dev))
3384 			return -EINVAL;
3385 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3386 		break;
3387 	case INTEL_PIPE_CRC_SOURCE_DP_B:
3388 		if (!IS_G4X(dev))
3389 			return -EINVAL;
3390 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3391 		need_stable_symbols = true;
3392 		break;
3393 	case INTEL_PIPE_CRC_SOURCE_DP_C:
3394 		if (!IS_G4X(dev))
3395 			return -EINVAL;
3396 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3397 		need_stable_symbols = true;
3398 		break;
3399 	case INTEL_PIPE_CRC_SOURCE_DP_D:
3400 		if (!IS_G4X(dev))
3401 			return -EINVAL;
3402 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3403 		need_stable_symbols = true;
3404 		break;
3405 	case INTEL_PIPE_CRC_SOURCE_NONE:
3406 		*val = 0;
3407 		break;
3408 	default:
3409 		return -EINVAL;
3410 	}
3411 
3412 	/*
3413 	 * When the pipe CRC tap point is after the transcoders we need
3414 	 * to tweak symbol-level features to produce a deterministic series of
3415 	 * symbols for a given frame. We need to reset those features only once
3416 	 * a frame (instead of every nth symbol):
3417 	 *   - DC-balance: used to ensure a better clock recovery from the data
3418 	 *     link (SDVO)
3419 	 *   - DisplayPort scrambling: used for EMI reduction
3420 	 */
3421 	if (need_stable_symbols) {
3422 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3423 
3424 		WARN_ON(!IS_G4X(dev));
3425 
3426 		I915_WRITE(PORT_DFT_I9XX,
3427 			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3428 
3429 		if (pipe == PIPE_A)
3430 			tmp |= PIPE_A_SCRAMBLE_RESET;
3431 		else
3432 			tmp |= PIPE_B_SCRAMBLE_RESET;
3433 
3434 		I915_WRITE(PORT_DFT2_G4X, tmp);
3435 	}
3436 
3437 	return 0;
3438 }
3439 
3440 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3441 					 enum pipe pipe)
3442 {
3443 	struct drm_i915_private *dev_priv = dev->dev_private;
3444 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3445 
3446 	switch (pipe) {
3447 	case PIPE_A:
3448 		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3449 		break;
3450 	case PIPE_B:
3451 		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3452 		break;
3453 	case PIPE_C:
3454 		tmp &= ~PIPE_C_SCRAMBLE_RESET;
3455 		break;
3456 	default:
3457 		return;
3458 	}
3459 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
3460 		tmp &= ~DC_BALANCE_RESET_VLV;
3461 	I915_WRITE(PORT_DFT2_G4X, tmp);
3462 
3463 }
3464 
3465 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3466 					 enum pipe pipe)
3467 {
3468 	struct drm_i915_private *dev_priv = dev->dev_private;
3469 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3470 
3471 	if (pipe == PIPE_A)
3472 		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3473 	else
3474 		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3475 	I915_WRITE(PORT_DFT2_G4X, tmp);
3476 
3477 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
3478 		I915_WRITE(PORT_DFT_I9XX,
3479 			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
3480 	}
3481 }
3482 
3483 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3484 				uint32_t *val)
3485 {
3486 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3487 		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3488 
3489 	switch (*source) {
3490 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3491 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
3492 		break;
3493 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3494 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
3495 		break;
3496 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3497 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
3498 		break;
3499 	case INTEL_PIPE_CRC_SOURCE_NONE:
3500 		*val = 0;
3501 		break;
3502 	default:
3503 		return -EINVAL;
3504 	}
3505 
3506 	return 0;
3507 }
3508 
3509 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
3510 {
3511 	struct drm_i915_private *dev_priv = dev->dev_private;
3512 	struct intel_crtc *crtc =
3513 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3514 
3515 	drm_modeset_lock_all(dev);
3516 	/*
3517 	 * If we use the eDP transcoder we need to make sure that we don't
3518 	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3519 	 * relevant on hsw with pipe A when using the always-on power well
3520 	 * routing.
3521 	 */
3522 	if (crtc->config->cpu_transcoder == TRANSCODER_EDP &&
3523 	    !crtc->config->pch_pfit.enabled) {
3524 		crtc->config->pch_pfit.force_thru = true;
3525 
3526 		intel_display_power_get(dev_priv,
3527 					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
3528 
3529 		dev_priv->display.crtc_disable(&crtc->base);
3530 		dev_priv->display.crtc_enable(&crtc->base);
3531 	}
3532 	drm_modeset_unlock_all(dev);
3533 }
3534 
3535 static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
3536 {
3537 	struct drm_i915_private *dev_priv = dev->dev_private;
3538 	struct intel_crtc *crtc =
3539 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3540 
3541 	drm_modeset_lock_all(dev);
3542 	/*
3543 	 * If we use the eDP transcoder we need to make sure that we don't
3544 	 * bypass the pfit, since otherwise the pipe CRC source won't work. Only
3545 	 * relevant on hsw with pipe A when using the always-on power well
3546 	 * routing.
3547 	 */
3548 	if (crtc->config->pch_pfit.force_thru) {
3549 		crtc->config->pch_pfit.force_thru = false;
3550 
3551 		dev_priv->display.crtc_disable(&crtc->base);
3552 		dev_priv->display.crtc_enable(&crtc->base);
3553 
3554 		intel_display_power_put(dev_priv,
3555 					POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
3556 	}
3557 	drm_modeset_unlock_all(dev);
3558 }
3559 
3560 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
3561 				enum pipe pipe,
3562 				enum intel_pipe_crc_source *source,
3563 				uint32_t *val)
3564 {
3565 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3566 		*source = INTEL_PIPE_CRC_SOURCE_PF;
3567 
3568 	switch (*source) {
3569 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3570 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
3571 		break;
3572 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3573 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
3574 		break;
3575 	case INTEL_PIPE_CRC_SOURCE_PF:
3576 		if (IS_HASWELL(dev) && pipe == PIPE_A)
3577 			hsw_trans_edp_pipe_A_crc_wa(dev);
3578 
3579 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
3580 		break;
3581 	case INTEL_PIPE_CRC_SOURCE_NONE:
3582 		*val = 0;
3583 		break;
3584 	default:
3585 		return -EINVAL;
3586 	}
3587 
3588 	return 0;
3589 }
3590 
3591 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3592 			       enum intel_pipe_crc_source source)
3593 {
3594 	struct drm_i915_private *dev_priv = dev->dev_private;
3595 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3596 	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3597 									pipe));
3598 	u32 val = 0; /* shut up gcc */
3599 	int ret;
3600 
3601 	if (pipe_crc->source == source)
3602 		return 0;
3603 
3604 	/* forbid changing the source without going back to 'none' */
3605 	if (pipe_crc->source && source)
3606 		return -EINVAL;
3607 
3608 	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
3609 		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
3610 		return -EIO;
3611 	}
3612 
3613 	if (IS_GEN2(dev))
3614 		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
3615 	else if (INTEL_INFO(dev)->gen < 5)
3616 		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3617 	else if (IS_VALLEYVIEW(dev))
3618 		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3619 	else if (IS_GEN5(dev) || IS_GEN6(dev))
3620 		ret = ilk_pipe_crc_ctl_reg(&source, &val);
3621 	else
3622 		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3623 
3624 	if (ret != 0)
3625 		return ret;
3626 
3627 	/* none -> real source transition */
3628 	if (source) {
3629 		struct intel_pipe_crc_entry *entries;
3630 
3631 		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
3632 				 pipe_name(pipe), pipe_crc_source_name(source));
3633 
3634 		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
3635 				  sizeof(pipe_crc->entries[0]),
3636 				  GFP_KERNEL);
3637 		if (!entries)
3638 			return -ENOMEM;
3639 
3640 		/*
3641 		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3642 		 * enabled and disabled dynamically based on package C states,
3643 		 * user space can't make reliable use of the CRCs, so let's just
3644 		 * completely disable it.
3645 		 */
3646 		hsw_disable_ips(crtc);
3647 
3648 		spin_lock_irq(&pipe_crc->lock);
3649 		kfree(pipe_crc->entries);
3650 		pipe_crc->entries = entries;
3651 		pipe_crc->head = 0;
3652 		pipe_crc->tail = 0;
3653 		spin_unlock_irq(&pipe_crc->lock);
3654 	}
3655 
3656 	pipe_crc->source = source;
3657 
3658 	I915_WRITE(PIPE_CRC_CTL(pipe), val);
3659 	POSTING_READ(PIPE_CRC_CTL(pipe));
3660 
3661 	/* real source -> none transition */
3662 	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
3663 		struct intel_pipe_crc_entry *entries;
3664 		struct intel_crtc *crtc =
3665 			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
3666 
3667 		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
3668 				 pipe_name(pipe));
3669 
3670 		drm_modeset_lock(&crtc->base.mutex, NULL);
3671 		if (crtc->active)
3672 			intel_wait_for_vblank(dev, pipe);
3673 		drm_modeset_unlock(&crtc->base.mutex);
3674 
3675 		spin_lock_irq(&pipe_crc->lock);
3676 		entries = pipe_crc->entries;
3677 		pipe_crc->entries = NULL;
3678 		pipe_crc->head = 0;
3679 		pipe_crc->tail = 0;
3680 		spin_unlock_irq(&pipe_crc->lock);
3681 
3682 		kfree(entries);
3683 
3684 		if (IS_G4X(dev))
3685 			g4x_undo_pipe_scramble_reset(dev, pipe);
3686 		else if (IS_VALLEYVIEW(dev))
3687 			vlv_undo_pipe_scramble_reset(dev, pipe);
3688 		else if (IS_HASWELL(dev) && pipe == PIPE_A)
3689 			hsw_undo_trans_edp_pipe_A_crc_wa(dev);
3690 
3691 		hsw_enable_ips(crtc);
3692 	}
3693 
3694 	return 0;
3695 }
3696 
3697 /*
3698  * Parse pipe CRC command strings:
3699  *   command: wsp* object wsp+ name wsp+ source wsp*
3700  *   object: 'pipe'
3701  *   name: (A | B | C)
3702  *   source: (none | plane1 | plane2 | pf)
3703  *   wsp: (#0x20 | #0x9 | #0xA)+
3704  *
3705  * eg.:
3706  *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
3707  *  "pipe A none"    ->  Stop CRC
3708  */
3709 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
3710 {
3711 	int n_words = 0;
3712 
3713 	while (*buf) {
3714 		char *end;
3715 
3716 		/* skip leading white space */
3717 		buf = skip_spaces(buf);
3718 		if (!*buf)
3719 			break;	/* end of buffer */
3720 
3721 		/* find end of word */
3722 		for (end = buf; *end && !isspace(*end); end++)
3723 			;
3724 
3725 		if (n_words == max_words) {
3726 			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
3727 					 max_words);
3728 			return -EINVAL;	/* ran out of words[] before bytes */
3729 		}
3730 
3731 		if (*end)
3732 			*end++ = '\0';
3733 		words[n_words++] = buf;
3734 		buf = end;
3735 	}
3736 
3737 	return n_words;
3738 }
3739 
3740 enum intel_pipe_crc_object {
3741 	PIPE_CRC_OBJECT_PIPE,
3742 };
3743 
3744 static const char * const pipe_crc_objects[] = {
3745 	"pipe",
3746 };
3747 
3748 static int
3749 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
3750 {
3751 	int i;
3752 
3753 	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
3754 		if (!strcmp(buf, pipe_crc_objects[i])) {
3755 			*o = i;
3756 			return 0;
3757 		    }
3758 
3759 	return -EINVAL;
3760 }
3761 
3762 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
3763 {
3764 	const char name = buf[0];
3765 
3766 	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
3767 		return -EINVAL;
3768 
3769 	*pipe = name - 'A';
3770 
3771 	return 0;
3772 }
3773 
3774 static int
3775 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
3776 {
3777 	int i;
3778 
3779 	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
3780 		if (!strcmp(buf, pipe_crc_sources[i])) {
3781 			*s = i;
3782 			return 0;
3783 		    }
3784 
3785 	return -EINVAL;
3786 }
3787 
3788 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
3789 {
3790 #define N_WORDS 3
3791 	int n_words;
3792 	char *words[N_WORDS];
3793 	enum pipe pipe;
3794 	enum intel_pipe_crc_object object;
3795 	enum intel_pipe_crc_source source;
3796 
3797 	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
3798 	if (n_words != N_WORDS) {
3799 		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3800 				 N_WORDS);
3801 		return -EINVAL;
3802 	}
3803 
3804 	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
3805 		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
3806 		return -EINVAL;
3807 	}
3808 
3809 	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
3810 		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
3811 		return -EINVAL;
3812 	}
3813 
3814 	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
3815 		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
3816 		return -EINVAL;
3817 	}
3818 
3819 	return pipe_crc_set_source(dev, pipe, source);
3820 }
3821 
3822 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
3823 				     size_t len, loff_t *offp)
3824 {
3825 	struct seq_file *m = file->private_data;
3826 	struct drm_device *dev = m->private;
3827 	char *tmpbuf;
3828 	int ret;
3829 
3830 	if (len == 0)
3831 		return 0;
3832 
3833 	if (len > PAGE_SIZE - 1) {
3834 		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3835 				 PAGE_SIZE);
3836 		return -E2BIG;
3837 	}
3838 
3839 	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
3840 	if (!tmpbuf)
3841 		return -ENOMEM;
3842 
3843 	if (copy_from_user(tmpbuf, ubuf, len)) {
3844 		ret = -EFAULT;
3845 		goto out;
3846 	}
3847 	tmpbuf[len] = '\0';
3848 
3849 	ret = display_crc_ctl_parse(dev, tmpbuf, len);
3850 
3851 out:
3852 	kfree(tmpbuf);
3853 	if (ret < 0)
3854 		return ret;
3855 
3856 	*offp += len;
3857 	return len;
3858 }
3859 
3860 static const struct file_operations i915_display_crc_ctl_fops = {
3861 	.owner = THIS_MODULE,
3862 	.open = display_crc_ctl_open,
3863 	.read = seq_read,
3864 	.llseek = seq_lseek,
3865 	.release = single_release,
3866 	.write = display_crc_ctl_write
3867 };
3868 
3869 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3870 {
3871 	struct drm_device *dev = m->private;
3872 	int num_levels = ilk_wm_max_level(dev) + 1;
3873 	int level;
3874 
3875 	drm_modeset_lock_all(dev);
3876 
3877 	for (level = 0; level < num_levels; level++) {
3878 		unsigned int latency = wm[level];
3879 
3880 		/*
3881 		 * - WM1+ latency values in 0.5us units
3882 		 * - latencies are in us on gen9
3883 		 */
3884 		if (INTEL_INFO(dev)->gen >= 9)
3885 			latency *= 10;
3886 		else if (level > 0)
3887 			latency *= 5;
3888 
3889 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3890 			   level, wm[level], latency / 10, latency % 10);
3891 	}
3892 
3893 	drm_modeset_unlock_all(dev);
3894 }
3895 
3896 static int pri_wm_latency_show(struct seq_file *m, void *data)
3897 {
3898 	struct drm_device *dev = m->private;
3899 	struct drm_i915_private *dev_priv = dev->dev_private;
3900 	const uint16_t *latencies;
3901 
3902 	if (INTEL_INFO(dev)->gen >= 9)
3903 		latencies = dev_priv->wm.skl_latency;
3904 	else
3905 		latencies = to_i915(dev)->wm.pri_latency;
3906 
3907 	wm_latency_show(m, latencies);
3908 
3909 	return 0;
3910 }
3911 
3912 static int spr_wm_latency_show(struct seq_file *m, void *data)
3913 {
3914 	struct drm_device *dev = m->private;
3915 	struct drm_i915_private *dev_priv = dev->dev_private;
3916 	const uint16_t *latencies;
3917 
3918 	if (INTEL_INFO(dev)->gen >= 9)
3919 		latencies = dev_priv->wm.skl_latency;
3920 	else
3921 		latencies = to_i915(dev)->wm.spr_latency;
3922 
3923 	wm_latency_show(m, latencies);
3924 
3925 	return 0;
3926 }
3927 
3928 static int cur_wm_latency_show(struct seq_file *m, void *data)
3929 {
3930 	struct drm_device *dev = m->private;
3931 	struct drm_i915_private *dev_priv = dev->dev_private;
3932 	const uint16_t *latencies;
3933 
3934 	if (INTEL_INFO(dev)->gen >= 9)
3935 		latencies = dev_priv->wm.skl_latency;
3936 	else
3937 		latencies = to_i915(dev)->wm.cur_latency;
3938 
3939 	wm_latency_show(m, latencies);
3940 
3941 	return 0;
3942 }
3943 
3944 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3945 {
3946 	struct drm_device *dev = inode->i_private;
3947 
3948 	if (HAS_GMCH_DISPLAY(dev))
3949 		return -ENODEV;
3950 
3951 	return single_open(file, pri_wm_latency_show, dev);
3952 }
3953 
3954 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3955 {
3956 	struct drm_device *dev = inode->i_private;
3957 
3958 	if (HAS_GMCH_DISPLAY(dev))
3959 		return -ENODEV;
3960 
3961 	return single_open(file, spr_wm_latency_show, dev);
3962 }
3963 
3964 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3965 {
3966 	struct drm_device *dev = inode->i_private;
3967 
3968 	if (HAS_GMCH_DISPLAY(dev))
3969 		return -ENODEV;
3970 
3971 	return single_open(file, cur_wm_latency_show, dev);
3972 }
3973 
3974 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3975 				size_t len, loff_t *offp, uint16_t wm[8])
3976 {
3977 	struct seq_file *m = file->private_data;
3978 	struct drm_device *dev = m->private;
3979 	uint16_t new[8] = { 0 };
3980 	int num_levels = ilk_wm_max_level(dev) + 1;
3981 	int level;
3982 	int ret;
3983 	char tmp[32];
3984 
3985 	if (len >= sizeof(tmp))
3986 		return -EINVAL;
3987 
3988 	if (copy_from_user(tmp, ubuf, len))
3989 		return -EFAULT;
3990 
3991 	tmp[len] = '\0';
3992 
3993 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3994 		     &new[0], &new[1], &new[2], &new[3],
3995 		     &new[4], &new[5], &new[6], &new[7]);
3996 	if (ret != num_levels)
3997 		return -EINVAL;
3998 
3999 	drm_modeset_lock_all(dev);
4000 
4001 	for (level = 0; level < num_levels; level++)
4002 		wm[level] = new[level];
4003 
4004 	drm_modeset_unlock_all(dev);
4005 
4006 	return len;
4007 }
4008 
4009 
4010 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4011 				    size_t len, loff_t *offp)
4012 {
4013 	struct seq_file *m = file->private_data;
4014 	struct drm_device *dev = m->private;
4015 	struct drm_i915_private *dev_priv = dev->dev_private;
4016 	uint16_t *latencies;
4017 
4018 	if (INTEL_INFO(dev)->gen >= 9)
4019 		latencies = dev_priv->wm.skl_latency;
4020 	else
4021 		latencies = to_i915(dev)->wm.pri_latency;
4022 
4023 	return wm_latency_write(file, ubuf, len, offp, latencies);
4024 }
4025 
4026 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4027 				    size_t len, loff_t *offp)
4028 {
4029 	struct seq_file *m = file->private_data;
4030 	struct drm_device *dev = m->private;
4031 	struct drm_i915_private *dev_priv = dev->dev_private;
4032 	uint16_t *latencies;
4033 
4034 	if (INTEL_INFO(dev)->gen >= 9)
4035 		latencies = dev_priv->wm.skl_latency;
4036 	else
4037 		latencies = to_i915(dev)->wm.spr_latency;
4038 
4039 	return wm_latency_write(file, ubuf, len, offp, latencies);
4040 }
4041 
4042 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4043 				    size_t len, loff_t *offp)
4044 {
4045 	struct seq_file *m = file->private_data;
4046 	struct drm_device *dev = m->private;
4047 	struct drm_i915_private *dev_priv = dev->dev_private;
4048 	uint16_t *latencies;
4049 
4050 	if (INTEL_INFO(dev)->gen >= 9)
4051 		latencies = dev_priv->wm.skl_latency;
4052 	else
4053 		latencies = to_i915(dev)->wm.cur_latency;
4054 
4055 	return wm_latency_write(file, ubuf, len, offp, latencies);
4056 }
4057 
4058 static const struct file_operations i915_pri_wm_latency_fops = {
4059 	.owner = THIS_MODULE,
4060 	.open = pri_wm_latency_open,
4061 	.read = seq_read,
4062 	.llseek = seq_lseek,
4063 	.release = single_release,
4064 	.write = pri_wm_latency_write
4065 };
4066 
4067 static const struct file_operations i915_spr_wm_latency_fops = {
4068 	.owner = THIS_MODULE,
4069 	.open = spr_wm_latency_open,
4070 	.read = seq_read,
4071 	.llseek = seq_lseek,
4072 	.release = single_release,
4073 	.write = spr_wm_latency_write
4074 };
4075 
4076 static const struct file_operations i915_cur_wm_latency_fops = {
4077 	.owner = THIS_MODULE,
4078 	.open = cur_wm_latency_open,
4079 	.read = seq_read,
4080 	.llseek = seq_lseek,
4081 	.release = single_release,
4082 	.write = cur_wm_latency_write
4083 };
4084 
4085 static int
4086 i915_wedged_get(void *data, u64 *val)
4087 {
4088 	struct drm_device *dev = data;
4089 	struct drm_i915_private *dev_priv = dev->dev_private;
4090 
4091 	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
4092 
4093 	return 0;
4094 }
4095 
4096 static int
4097 i915_wedged_set(void *data, u64 val)
4098 {
4099 	struct drm_device *dev = data;
4100 	struct drm_i915_private *dev_priv = dev->dev_private;
4101 
4102 	/*
4103 	 * There is no safeguard against this debugfs entry colliding
4104 	 * with the hangcheck calling same i915_handle_error() in
4105 	 * parallel, causing an explosion. For now we assume that the
4106 	 * test harness is responsible enough not to inject gpu hangs
4107 	 * while it is writing to 'i915_wedged'
4108 	 */
4109 
4110 	if (i915_reset_in_progress(&dev_priv->gpu_error))
4111 		return -EAGAIN;
4112 
4113 	intel_runtime_pm_get(dev_priv);
4114 
4115 	i915_handle_error(dev, val,
4116 			  "Manually setting wedged to %llu", val);
4117 
4118 	intel_runtime_pm_put(dev_priv);
4119 
4120 	return 0;
4121 }
4122 
4123 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4124 			i915_wedged_get, i915_wedged_set,
4125 			"%llu\n");
4126 
4127 static int
4128 i915_ring_stop_get(void *data, u64 *val)
4129 {
4130 	struct drm_device *dev = data;
4131 	struct drm_i915_private *dev_priv = dev->dev_private;
4132 
4133 	*val = dev_priv->gpu_error.stop_rings;
4134 
4135 	return 0;
4136 }
4137 
4138 static int
4139 i915_ring_stop_set(void *data, u64 val)
4140 {
4141 	struct drm_device *dev = data;
4142 	struct drm_i915_private *dev_priv = dev->dev_private;
4143 	int ret;
4144 
4145 	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4146 
4147 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4148 	if (ret)
4149 		return ret;
4150 
4151 	dev_priv->gpu_error.stop_rings = val;
4152 	mutex_unlock(&dev->struct_mutex);
4153 
4154 	return 0;
4155 }
4156 
4157 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
4158 			i915_ring_stop_get, i915_ring_stop_set,
4159 			"0x%08llx\n");
4160 
4161 static int
4162 i915_ring_missed_irq_get(void *data, u64 *val)
4163 {
4164 	struct drm_device *dev = data;
4165 	struct drm_i915_private *dev_priv = dev->dev_private;
4166 
4167 	*val = dev_priv->gpu_error.missed_irq_rings;
4168 	return 0;
4169 }
4170 
4171 static int
4172 i915_ring_missed_irq_set(void *data, u64 val)
4173 {
4174 	struct drm_device *dev = data;
4175 	struct drm_i915_private *dev_priv = dev->dev_private;
4176 	int ret;
4177 
4178 	/* Lock against concurrent debugfs callers */
4179 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4180 	if (ret)
4181 		return ret;
4182 	dev_priv->gpu_error.missed_irq_rings = val;
4183 	mutex_unlock(&dev->struct_mutex);
4184 
4185 	return 0;
4186 }
4187 
4188 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4189 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4190 			"0x%08llx\n");
4191 
4192 static int
4193 i915_ring_test_irq_get(void *data, u64 *val)
4194 {
4195 	struct drm_device *dev = data;
4196 	struct drm_i915_private *dev_priv = dev->dev_private;
4197 
4198 	*val = dev_priv->gpu_error.test_irq_rings;
4199 
4200 	return 0;
4201 }
4202 
4203 static int
4204 i915_ring_test_irq_set(void *data, u64 val)
4205 {
4206 	struct drm_device *dev = data;
4207 	struct drm_i915_private *dev_priv = dev->dev_private;
4208 	int ret;
4209 
4210 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4211 
4212 	/* Lock against concurrent debugfs callers */
4213 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4214 	if (ret)
4215 		return ret;
4216 
4217 	dev_priv->gpu_error.test_irq_rings = val;
4218 	mutex_unlock(&dev->struct_mutex);
4219 
4220 	return 0;
4221 }
4222 
4223 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4224 			i915_ring_test_irq_get, i915_ring_test_irq_set,
4225 			"0x%08llx\n");
4226 
4227 #define DROP_UNBOUND 0x1
4228 #define DROP_BOUND 0x2
4229 #define DROP_RETIRE 0x4
4230 #define DROP_ACTIVE 0x8
4231 #define DROP_ALL (DROP_UNBOUND | \
4232 		  DROP_BOUND | \
4233 		  DROP_RETIRE | \
4234 		  DROP_ACTIVE)
4235 static int
4236 i915_drop_caches_get(void *data, u64 *val)
4237 {
4238 	*val = DROP_ALL;
4239 
4240 	return 0;
4241 }
4242 
4243 static int
4244 i915_drop_caches_set(void *data, u64 val)
4245 {
4246 	struct drm_device *dev = data;
4247 	struct drm_i915_private *dev_priv = dev->dev_private;
4248 	int ret;
4249 
4250 	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4251 
4252 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4253 	 * on ioctls on -EAGAIN. */
4254 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4255 	if (ret)
4256 		return ret;
4257 
4258 	if (val & DROP_ACTIVE) {
4259 		ret = i915_gpu_idle(dev);
4260 		if (ret)
4261 			goto unlock;
4262 	}
4263 
4264 	if (val & (DROP_RETIRE | DROP_ACTIVE))
4265 		i915_gem_retire_requests(dev);
4266 
4267 	if (val & DROP_BOUND)
4268 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4269 
4270 	if (val & DROP_UNBOUND)
4271 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4272 
4273 unlock:
4274 	mutex_unlock(&dev->struct_mutex);
4275 
4276 	return ret;
4277 }
4278 
4279 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4280 			i915_drop_caches_get, i915_drop_caches_set,
4281 			"0x%08llx\n");
4282 
4283 static int
4284 i915_max_freq_get(void *data, u64 *val)
4285 {
4286 	struct drm_device *dev = data;
4287 	struct drm_i915_private *dev_priv = dev->dev_private;
4288 	int ret;
4289 
4290 	if (INTEL_INFO(dev)->gen < 6)
4291 		return -ENODEV;
4292 
4293 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4294 
4295 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4296 	if (ret)
4297 		return ret;
4298 
4299 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4300 	mutex_unlock(&dev_priv->rps.hw_lock);
4301 
4302 	return 0;
4303 }
4304 
4305 static int
4306 i915_max_freq_set(void *data, u64 val)
4307 {
4308 	struct drm_device *dev = data;
4309 	struct drm_i915_private *dev_priv = dev->dev_private;
4310 	u32 hw_max, hw_min;
4311 	int ret;
4312 
4313 	if (INTEL_INFO(dev)->gen < 6)
4314 		return -ENODEV;
4315 
4316 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4317 
4318 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4319 
4320 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4321 	if (ret)
4322 		return ret;
4323 
4324 	/*
4325 	 * Turbo will still be enabled, but won't go above the set value.
4326 	 */
4327 	val = intel_freq_opcode(dev_priv, val);
4328 
4329 	hw_max = dev_priv->rps.max_freq;
4330 	hw_min = dev_priv->rps.min_freq;
4331 
4332 	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4333 		mutex_unlock(&dev_priv->rps.hw_lock);
4334 		return -EINVAL;
4335 	}
4336 
4337 	dev_priv->rps.max_freq_softlimit = val;
4338 
4339 	intel_set_rps(dev, val);
4340 
4341 	mutex_unlock(&dev_priv->rps.hw_lock);
4342 
4343 	return 0;
4344 }
4345 
4346 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4347 			i915_max_freq_get, i915_max_freq_set,
4348 			"%llu\n");
4349 
4350 static int
4351 i915_min_freq_get(void *data, u64 *val)
4352 {
4353 	struct drm_device *dev = data;
4354 	struct drm_i915_private *dev_priv = dev->dev_private;
4355 	int ret;
4356 
4357 	if (INTEL_INFO(dev)->gen < 6)
4358 		return -ENODEV;
4359 
4360 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4361 
4362 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4363 	if (ret)
4364 		return ret;
4365 
4366 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
4367 	mutex_unlock(&dev_priv->rps.hw_lock);
4368 
4369 	return 0;
4370 }
4371 
4372 static int
4373 i915_min_freq_set(void *data, u64 val)
4374 {
4375 	struct drm_device *dev = data;
4376 	struct drm_i915_private *dev_priv = dev->dev_private;
4377 	u32 hw_max, hw_min;
4378 	int ret;
4379 
4380 	if (INTEL_INFO(dev)->gen < 6)
4381 		return -ENODEV;
4382 
4383 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4384 
4385 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4386 
4387 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4388 	if (ret)
4389 		return ret;
4390 
4391 	/*
4392 	 * Turbo will still be enabled, but won't go below the set value.
4393 	 */
4394 	val = intel_freq_opcode(dev_priv, val);
4395 
4396 	hw_max = dev_priv->rps.max_freq;
4397 	hw_min = dev_priv->rps.min_freq;
4398 
4399 	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
4400 		mutex_unlock(&dev_priv->rps.hw_lock);
4401 		return -EINVAL;
4402 	}
4403 
4404 	dev_priv->rps.min_freq_softlimit = val;
4405 
4406 	intel_set_rps(dev, val);
4407 
4408 	mutex_unlock(&dev_priv->rps.hw_lock);
4409 
4410 	return 0;
4411 }
4412 
4413 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4414 			i915_min_freq_get, i915_min_freq_set,
4415 			"%llu\n");
4416 
4417 static int
4418 i915_cache_sharing_get(void *data, u64 *val)
4419 {
4420 	struct drm_device *dev = data;
4421 	struct drm_i915_private *dev_priv = dev->dev_private;
4422 	u32 snpcr;
4423 	int ret;
4424 
4425 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
4426 		return -ENODEV;
4427 
4428 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4429 	if (ret)
4430 		return ret;
4431 	intel_runtime_pm_get(dev_priv);
4432 
4433 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4434 
4435 	intel_runtime_pm_put(dev_priv);
4436 	mutex_unlock(&dev_priv->dev->struct_mutex);
4437 
4438 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4439 
4440 	return 0;
4441 }
4442 
4443 static int
4444 i915_cache_sharing_set(void *data, u64 val)
4445 {
4446 	struct drm_device *dev = data;
4447 	struct drm_i915_private *dev_priv = dev->dev_private;
4448 	u32 snpcr;
4449 
4450 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
4451 		return -ENODEV;
4452 
4453 	if (val > 3)
4454 		return -EINVAL;
4455 
4456 	intel_runtime_pm_get(dev_priv);
4457 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4458 
4459 	/* Update the cache sharing policy here as well */
4460 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4461 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
4462 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4463 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4464 
4465 	intel_runtime_pm_put(dev_priv);
4466 	return 0;
4467 }
4468 
4469 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4470 			i915_cache_sharing_get, i915_cache_sharing_set,
4471 			"%llu\n");
4472 
4473 static int i915_sseu_status(struct seq_file *m, void *unused)
4474 {
4475 	struct drm_info_node *node = (struct drm_info_node *) m->private;
4476 	struct drm_device *dev = node->minor->dev;
4477 	struct drm_i915_private *dev_priv = dev->dev_private;
4478 	unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0;
4479 
4480 	if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
4481 		return -ENODEV;
4482 
4483 	seq_puts(m, "SSEU Device Info\n");
4484 	seq_printf(m, "  Available Slice Total: %u\n",
4485 		   INTEL_INFO(dev)->slice_total);
4486 	seq_printf(m, "  Available Subslice Total: %u\n",
4487 		   INTEL_INFO(dev)->subslice_total);
4488 	seq_printf(m, "  Available Subslice Per Slice: %u\n",
4489 		   INTEL_INFO(dev)->subslice_per_slice);
4490 	seq_printf(m, "  Available EU Total: %u\n",
4491 		   INTEL_INFO(dev)->eu_total);
4492 	seq_printf(m, "  Available EU Per Subslice: %u\n",
4493 		   INTEL_INFO(dev)->eu_per_subslice);
4494 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4495 		   yesno(INTEL_INFO(dev)->has_slice_pg));
4496 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4497 		   yesno(INTEL_INFO(dev)->has_subslice_pg));
4498 	seq_printf(m, "  Has EU Power Gating: %s\n",
4499 		   yesno(INTEL_INFO(dev)->has_eu_pg));
4500 
4501 	seq_puts(m, "SSEU Device Status\n");
4502 	if (IS_CHERRYVIEW(dev)) {
4503 		const int ss_max = 2;
4504 		int ss;
4505 		u32 sig1[ss_max], sig2[ss_max];
4506 
4507 		sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4508 		sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4509 		sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4510 		sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4511 
4512 		for (ss = 0; ss < ss_max; ss++) {
4513 			unsigned int eu_cnt;
4514 
4515 			if (sig1[ss] & CHV_SS_PG_ENABLE)
4516 				/* skip disabled subslice */
4517 				continue;
4518 
4519 			s_tot = 1;
4520 			ss_per++;
4521 			eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4522 				 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4523 				 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4524 				 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4525 			eu_tot += eu_cnt;
4526 			eu_per = max(eu_per, eu_cnt);
4527 		}
4528 		ss_tot = ss_per;
4529 	} else if (IS_SKYLAKE(dev)) {
4530 		const int s_max = 3, ss_max = 4;
4531 		int s, ss;
4532 		u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
4533 
4534 		s_reg[0] = I915_READ(GEN9_SLICE0_PGCTL_ACK);
4535 		s_reg[1] = I915_READ(GEN9_SLICE1_PGCTL_ACK);
4536 		s_reg[2] = I915_READ(GEN9_SLICE2_PGCTL_ACK);
4537 		eu_reg[0] = I915_READ(GEN9_SLICE0_SS01_EU_PGCTL_ACK);
4538 		eu_reg[1] = I915_READ(GEN9_SLICE0_SS23_EU_PGCTL_ACK);
4539 		eu_reg[2] = I915_READ(GEN9_SLICE1_SS01_EU_PGCTL_ACK);
4540 		eu_reg[3] = I915_READ(GEN9_SLICE1_SS23_EU_PGCTL_ACK);
4541 		eu_reg[4] = I915_READ(GEN9_SLICE2_SS01_EU_PGCTL_ACK);
4542 		eu_reg[5] = I915_READ(GEN9_SLICE2_SS23_EU_PGCTL_ACK);
4543 		eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4544 			     GEN9_PGCTL_SSA_EU19_ACK |
4545 			     GEN9_PGCTL_SSA_EU210_ACK |
4546 			     GEN9_PGCTL_SSA_EU311_ACK;
4547 		eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4548 			     GEN9_PGCTL_SSB_EU19_ACK |
4549 			     GEN9_PGCTL_SSB_EU210_ACK |
4550 			     GEN9_PGCTL_SSB_EU311_ACK;
4551 
4552 		for (s = 0; s < s_max; s++) {
4553 			if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4554 				/* skip disabled slice */
4555 				continue;
4556 
4557 			s_tot++;
4558 			ss_per = INTEL_INFO(dev)->subslice_per_slice;
4559 			ss_tot += ss_per;
4560 			for (ss = 0; ss < ss_max; ss++) {
4561 				unsigned int eu_cnt;
4562 
4563 				eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4564 						       eu_mask[ss%2]);
4565 				eu_tot += eu_cnt;
4566 				eu_per = max(eu_per, eu_cnt);
4567 			}
4568 		}
4569 	}
4570 	seq_printf(m, "  Enabled Slice Total: %u\n", s_tot);
4571 	seq_printf(m, "  Enabled Subslice Total: %u\n", ss_tot);
4572 	seq_printf(m, "  Enabled Subslice Per Slice: %u\n", ss_per);
4573 	seq_printf(m, "  Enabled EU Total: %u\n", eu_tot);
4574 	seq_printf(m, "  Enabled EU Per Subslice: %u\n", eu_per);
4575 
4576 	return 0;
4577 }
4578 
4579 static int i915_forcewake_open(struct inode *inode, struct file *file)
4580 {
4581 	struct drm_device *dev = inode->i_private;
4582 	struct drm_i915_private *dev_priv = dev->dev_private;
4583 
4584 	if (INTEL_INFO(dev)->gen < 6)
4585 		return 0;
4586 
4587 	intel_runtime_pm_get(dev_priv);
4588 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4589 
4590 	return 0;
4591 }
4592 
4593 static int i915_forcewake_release(struct inode *inode, struct file *file)
4594 {
4595 	struct drm_device *dev = inode->i_private;
4596 	struct drm_i915_private *dev_priv = dev->dev_private;
4597 
4598 	if (INTEL_INFO(dev)->gen < 6)
4599 		return 0;
4600 
4601 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4602 	intel_runtime_pm_put(dev_priv);
4603 
4604 	return 0;
4605 }
4606 
4607 static const struct file_operations i915_forcewake_fops = {
4608 	.owner = THIS_MODULE,
4609 	.open = i915_forcewake_open,
4610 	.release = i915_forcewake_release,
4611 };
4612 
4613 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
4614 {
4615 	struct drm_device *dev = minor->dev;
4616 	struct dentry *ent;
4617 
4618 	ent = debugfs_create_file("i915_forcewake_user",
4619 				  S_IRUSR,
4620 				  root, dev,
4621 				  &i915_forcewake_fops);
4622 	if (!ent)
4623 		return -ENOMEM;
4624 
4625 	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
4626 }
4627 
4628 static int i915_debugfs_create(struct dentry *root,
4629 			       struct drm_minor *minor,
4630 			       const char *name,
4631 			       const struct file_operations *fops)
4632 {
4633 	struct drm_device *dev = minor->dev;
4634 	struct dentry *ent;
4635 
4636 	ent = debugfs_create_file(name,
4637 				  S_IRUGO | S_IWUSR,
4638 				  root, dev,
4639 				  fops);
4640 	if (!ent)
4641 		return -ENOMEM;
4642 
4643 	return drm_add_fake_info_node(minor, ent, fops);
4644 }
4645 
4646 static const struct drm_info_list i915_debugfs_list[] = {
4647 	{"i915_capabilities", i915_capabilities, 0},
4648 	{"i915_gem_objects", i915_gem_object_info, 0},
4649 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4650 	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
4651 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
4652 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
4653 	{"i915_gem_stolen", i915_gem_stolen_list_info },
4654 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
4655 	{"i915_gem_request", i915_gem_request_info, 0},
4656 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
4657 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4658 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4659 	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
4660 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
4661 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
4662 	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
4663 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4664 	{"i915_frequency_info", i915_frequency_info, 0},
4665 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4666 	{"i915_drpc_info", i915_drpc_info, 0},
4667 	{"i915_emon_status", i915_emon_status, 0},
4668 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4669 	{"i915_fbc_status", i915_fbc_status, 0},
4670 	{"i915_ips_status", i915_ips_status, 0},
4671 	{"i915_sr_status", i915_sr_status, 0},
4672 	{"i915_opregion", i915_opregion, 0},
4673 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4674 	{"i915_context_status", i915_context_status, 0},
4675 	{"i915_dump_lrc", i915_dump_lrc, 0},
4676 	{"i915_execlists", i915_execlists, 0},
4677 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4678 	{"i915_swizzle_info", i915_swizzle_info, 0},
4679 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4680 	{"i915_llc", i915_llc, 0},
4681 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4682 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
4683 	{"i915_energy_uJ", i915_energy_uJ, 0},
4684 	{"i915_pc8_status", i915_pc8_status, 0},
4685 	{"i915_power_domain_info", i915_power_domain_info, 0},
4686 	{"i915_display_info", i915_display_info, 0},
4687 	{"i915_semaphore_status", i915_semaphore_status, 0},
4688 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4689 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4690 	{"i915_wa_registers", i915_wa_registers, 0},
4691 	{"i915_ddb_info", i915_ddb_info, 0},
4692 	{"i915_sseu_status", i915_sseu_status, 0},
4693 	{"i915_drrs_status", i915_drrs_status, 0},
4694 };
4695 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4696 
4697 static const struct i915_debugfs_files {
4698 	const char *name;
4699 	const struct file_operations *fops;
4700 } i915_debugfs_files[] = {
4701 	{"i915_wedged", &i915_wedged_fops},
4702 	{"i915_max_freq", &i915_max_freq_fops},
4703 	{"i915_min_freq", &i915_min_freq_fops},
4704 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4705 	{"i915_ring_stop", &i915_ring_stop_fops},
4706 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4707 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4708 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4709 	{"i915_error_state", &i915_error_state_fops},
4710 	{"i915_next_seqno", &i915_next_seqno_fops},
4711 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
4712 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4713 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4714 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4715 	{"i915_fbc_false_color", &i915_fbc_fc_fops},
4716 };
4717 
4718 void intel_display_crc_init(struct drm_device *dev)
4719 {
4720 	struct drm_i915_private *dev_priv = dev->dev_private;
4721 	enum pipe pipe;
4722 
4723 	for_each_pipe(dev_priv, pipe) {
4724 		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4725 
4726 		pipe_crc->opened = false;
4727 		spin_lock_init(&pipe_crc->lock);
4728 		init_waitqueue_head(&pipe_crc->wq);
4729 	}
4730 }
4731 
4732 int i915_debugfs_init(struct drm_minor *minor)
4733 {
4734 	int ret, i;
4735 
4736 	ret = i915_forcewake_create(minor->debugfs_root, minor);
4737 	if (ret)
4738 		return ret;
4739 
4740 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
4741 		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
4742 		if (ret)
4743 			return ret;
4744 	}
4745 
4746 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4747 		ret = i915_debugfs_create(minor->debugfs_root, minor,
4748 					  i915_debugfs_files[i].name,
4749 					  i915_debugfs_files[i].fops);
4750 		if (ret)
4751 			return ret;
4752 	}
4753 
4754 	return drm_debugfs_create_files(i915_debugfs_list,
4755 					I915_DEBUGFS_ENTRIES,
4756 					minor->debugfs_root, minor);
4757 }
4758 
4759 void i915_debugfs_cleanup(struct drm_minor *minor)
4760 {
4761 	int i;
4762 
4763 	drm_debugfs_remove_files(i915_debugfs_list,
4764 				 I915_DEBUGFS_ENTRIES, minor);
4765 
4766 	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
4767 				 1, minor);
4768 
4769 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
4770 		struct drm_info_list *info_list =
4771 			(struct drm_info_list *)&i915_pipe_crc_data[i];
4772 
4773 		drm_debugfs_remove_files(info_list, 1, minor);
4774 	}
4775 
4776 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4777 		struct drm_info_list *info_list =
4778 			(struct drm_info_list *) i915_debugfs_files[i].fops;
4779 
4780 		drm_debugfs_remove_files(info_list, 1, minor);
4781 	}
4782 }
4783