1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
37 #include <drm/drmP.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
41 #include "i915_drv.h"
42 
43 enum {
44 	ACTIVE_LIST,
45 	INACTIVE_LIST,
46 	PINNED_LIST,
47 };
48 
49 static const char *yesno(int v)
50 {
51 	return v ? "yes" : "no";
52 }
53 
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55  * allocated we need to hook into the minor for release. */
56 static int
57 drm_add_fake_info_node(struct drm_minor *minor,
58 		       struct dentry *ent,
59 		       const void *key)
60 {
61 	struct drm_info_node *node;
62 
63 	node = kmalloc(sizeof(*node), GFP_KERNEL);
64 	if (node == NULL) {
65 		debugfs_remove(ent);
66 		return -ENOMEM;
67 	}
68 
69 	node->minor = minor;
70 	node->dent = ent;
71 	node->info_ent = (void *) key;
72 
73 	mutex_lock(&minor->debugfs_lock);
74 	list_add(&node->list, &minor->debugfs_list);
75 	mutex_unlock(&minor->debugfs_lock);
76 
77 	return 0;
78 }
79 
80 static int i915_capabilities(struct seq_file *m, void *data)
81 {
82 	struct drm_info_node *node = m->private;
83 	struct drm_device *dev = node->minor->dev;
84 	const struct intel_device_info *info = INTEL_INFO(dev);
85 
86 	seq_printf(m, "gen: %d\n", info->gen);
87 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
88 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
91 #undef PRINT_FLAG
92 #undef SEP_SEMICOLON
93 
94 	return 0;
95 }
96 
97 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
98 {
99 	if (obj->user_pin_count > 0)
100 		return "P";
101 	else if (i915_gem_obj_is_pinned(obj))
102 		return "p";
103 	else
104 		return " ";
105 }
106 
107 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
108 {
109 	switch (obj->tiling_mode) {
110 	default:
111 	case I915_TILING_NONE: return " ";
112 	case I915_TILING_X: return "X";
113 	case I915_TILING_Y: return "Y";
114 	}
115 }
116 
117 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
118 {
119 	return obj->has_global_gtt_mapping ? "g" : " ";
120 }
121 
122 static void
123 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124 {
125 	struct i915_vma *vma;
126 	int pin_count = 0;
127 
128 	seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
129 		   &obj->base,
130 		   get_pin_flag(obj),
131 		   get_tiling_flag(obj),
132 		   get_global_flag(obj),
133 		   obj->base.size / 1024,
134 		   obj->base.read_domains,
135 		   obj->base.write_domain,
136 		   obj->last_read_seqno,
137 		   obj->last_write_seqno,
138 		   obj->last_fenced_seqno,
139 		   i915_cache_level_str(obj->cache_level),
140 		   obj->dirty ? " dirty" : "",
141 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
142 	if (obj->base.name)
143 		seq_printf(m, " (name: %d)", obj->base.name);
144 	list_for_each_entry(vma, &obj->vma_list, vma_link)
145 		if (vma->pin_count > 0)
146 			pin_count++;
147 		seq_printf(m, " (pinned x %d)", pin_count);
148 	if (obj->pin_display)
149 		seq_printf(m, " (display)");
150 	if (obj->fence_reg != I915_FENCE_REG_NONE)
151 		seq_printf(m, " (fence: %d)", obj->fence_reg);
152 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
153 		if (!i915_is_ggtt(vma->vm))
154 			seq_puts(m, " (pp");
155 		else
156 			seq_puts(m, " (g");
157 		seq_printf(m, "gtt offset: %08lx, size: %08lx)",
158 			   vma->node.start, vma->node.size);
159 	}
160 	if (obj->stolen)
161 		seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
162 	if (obj->pin_mappable || obj->fault_mappable) {
163 		char s[3], *t = s;
164 		if (obj->pin_mappable)
165 			*t++ = 'p';
166 		if (obj->fault_mappable)
167 			*t++ = 'f';
168 		*t = '\0';
169 		seq_printf(m, " (%s mappable)", s);
170 	}
171 	if (obj->ring != NULL)
172 		seq_printf(m, " (%s)", obj->ring->name);
173 }
174 
175 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
176 {
177 	seq_putc(m, ctx->is_initialized ? 'I' : 'i');
178 	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
179 	seq_putc(m, ' ');
180 }
181 
182 static int i915_gem_object_list_info(struct seq_file *m, void *data)
183 {
184 	struct drm_info_node *node = m->private;
185 	uintptr_t list = (uintptr_t) node->info_ent->data;
186 	struct list_head *head;
187 	struct drm_device *dev = node->minor->dev;
188 	struct drm_i915_private *dev_priv = dev->dev_private;
189 	struct i915_address_space *vm = &dev_priv->gtt.base;
190 	struct i915_vma *vma;
191 	size_t total_obj_size, total_gtt_size;
192 	int count, ret;
193 
194 	ret = mutex_lock_interruptible(&dev->struct_mutex);
195 	if (ret)
196 		return ret;
197 
198 	/* FIXME: the user of this interface might want more than just GGTT */
199 	switch (list) {
200 	case ACTIVE_LIST:
201 		seq_puts(m, "Active:\n");
202 		head = &vm->active_list;
203 		break;
204 	case INACTIVE_LIST:
205 		seq_puts(m, "Inactive:\n");
206 		head = &vm->inactive_list;
207 		break;
208 	default:
209 		mutex_unlock(&dev->struct_mutex);
210 		return -EINVAL;
211 	}
212 
213 	total_obj_size = total_gtt_size = count = 0;
214 	list_for_each_entry(vma, head, mm_list) {
215 		seq_printf(m, "   ");
216 		describe_obj(m, vma->obj);
217 		seq_printf(m, "\n");
218 		total_obj_size += vma->obj->base.size;
219 		total_gtt_size += vma->node.size;
220 		count++;
221 	}
222 	mutex_unlock(&dev->struct_mutex);
223 
224 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
225 		   count, total_obj_size, total_gtt_size);
226 	return 0;
227 }
228 
229 static int obj_rank_by_stolen(void *priv,
230 			      struct list_head *A, struct list_head *B)
231 {
232 	struct drm_i915_gem_object *a =
233 		container_of(A, struct drm_i915_gem_object, obj_exec_link);
234 	struct drm_i915_gem_object *b =
235 		container_of(B, struct drm_i915_gem_object, obj_exec_link);
236 
237 	return a->stolen->start - b->stolen->start;
238 }
239 
240 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
241 {
242 	struct drm_info_node *node = m->private;
243 	struct drm_device *dev = node->minor->dev;
244 	struct drm_i915_private *dev_priv = dev->dev_private;
245 	struct drm_i915_gem_object *obj;
246 	size_t total_obj_size, total_gtt_size;
247 	LIST_HEAD(stolen);
248 	int count, ret;
249 
250 	ret = mutex_lock_interruptible(&dev->struct_mutex);
251 	if (ret)
252 		return ret;
253 
254 	total_obj_size = total_gtt_size = count = 0;
255 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
256 		if (obj->stolen == NULL)
257 			continue;
258 
259 		list_add(&obj->obj_exec_link, &stolen);
260 
261 		total_obj_size += obj->base.size;
262 		total_gtt_size += i915_gem_obj_ggtt_size(obj);
263 		count++;
264 	}
265 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
266 		if (obj->stolen == NULL)
267 			continue;
268 
269 		list_add(&obj->obj_exec_link, &stolen);
270 
271 		total_obj_size += obj->base.size;
272 		count++;
273 	}
274 	list_sort(NULL, &stolen, obj_rank_by_stolen);
275 	seq_puts(m, "Stolen:\n");
276 	while (!list_empty(&stolen)) {
277 		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
278 		seq_puts(m, "   ");
279 		describe_obj(m, obj);
280 		seq_putc(m, '\n');
281 		list_del_init(&obj->obj_exec_link);
282 	}
283 	mutex_unlock(&dev->struct_mutex);
284 
285 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
286 		   count, total_obj_size, total_gtt_size);
287 	return 0;
288 }
289 
290 #define count_objects(list, member) do { \
291 	list_for_each_entry(obj, list, member) { \
292 		size += i915_gem_obj_ggtt_size(obj); \
293 		++count; \
294 		if (obj->map_and_fenceable) { \
295 			mappable_size += i915_gem_obj_ggtt_size(obj); \
296 			++mappable_count; \
297 		} \
298 	} \
299 } while (0)
300 
301 struct file_stats {
302 	struct drm_i915_file_private *file_priv;
303 	int count;
304 	size_t total, unbound;
305 	size_t global, shared;
306 	size_t active, inactive;
307 };
308 
309 static int per_file_stats(int id, void *ptr, void *data)
310 {
311 	struct drm_i915_gem_object *obj = ptr;
312 	struct file_stats *stats = data;
313 	struct i915_vma *vma;
314 
315 	stats->count++;
316 	stats->total += obj->base.size;
317 
318 	if (obj->base.name || obj->base.dma_buf)
319 		stats->shared += obj->base.size;
320 
321 	if (USES_FULL_PPGTT(obj->base.dev)) {
322 		list_for_each_entry(vma, &obj->vma_list, vma_link) {
323 			struct i915_hw_ppgtt *ppgtt;
324 
325 			if (!drm_mm_node_allocated(&vma->node))
326 				continue;
327 
328 			if (i915_is_ggtt(vma->vm)) {
329 				stats->global += obj->base.size;
330 				continue;
331 			}
332 
333 			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
334 			if (ppgtt->ctx && ppgtt->ctx->file_priv != stats->file_priv)
335 				continue;
336 
337 			if (obj->ring) /* XXX per-vma statistic */
338 				stats->active += obj->base.size;
339 			else
340 				stats->inactive += obj->base.size;
341 
342 			return 0;
343 		}
344 	} else {
345 		if (i915_gem_obj_ggtt_bound(obj)) {
346 			stats->global += obj->base.size;
347 			if (obj->ring)
348 				stats->active += obj->base.size;
349 			else
350 				stats->inactive += obj->base.size;
351 			return 0;
352 		}
353 	}
354 
355 	if (!list_empty(&obj->global_list))
356 		stats->unbound += obj->base.size;
357 
358 	return 0;
359 }
360 
361 #define count_vmas(list, member) do { \
362 	list_for_each_entry(vma, list, member) { \
363 		size += i915_gem_obj_ggtt_size(vma->obj); \
364 		++count; \
365 		if (vma->obj->map_and_fenceable) { \
366 			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
367 			++mappable_count; \
368 		} \
369 	} \
370 } while (0)
371 
372 static int i915_gem_object_info(struct seq_file *m, void* data)
373 {
374 	struct drm_info_node *node = m->private;
375 	struct drm_device *dev = node->minor->dev;
376 	struct drm_i915_private *dev_priv = dev->dev_private;
377 	u32 count, mappable_count, purgeable_count;
378 	size_t size, mappable_size, purgeable_size;
379 	struct drm_i915_gem_object *obj;
380 	struct i915_address_space *vm = &dev_priv->gtt.base;
381 	struct drm_file *file;
382 	struct i915_vma *vma;
383 	int ret;
384 
385 	ret = mutex_lock_interruptible(&dev->struct_mutex);
386 	if (ret)
387 		return ret;
388 
389 	seq_printf(m, "%u objects, %zu bytes\n",
390 		   dev_priv->mm.object_count,
391 		   dev_priv->mm.object_memory);
392 
393 	size = count = mappable_size = mappable_count = 0;
394 	count_objects(&dev_priv->mm.bound_list, global_list);
395 	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
396 		   count, mappable_count, size, mappable_size);
397 
398 	size = count = mappable_size = mappable_count = 0;
399 	count_vmas(&vm->active_list, mm_list);
400 	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
401 		   count, mappable_count, size, mappable_size);
402 
403 	size = count = mappable_size = mappable_count = 0;
404 	count_vmas(&vm->inactive_list, mm_list);
405 	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
406 		   count, mappable_count, size, mappable_size);
407 
408 	size = count = purgeable_size = purgeable_count = 0;
409 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
410 		size += obj->base.size, ++count;
411 		if (obj->madv == I915_MADV_DONTNEED)
412 			purgeable_size += obj->base.size, ++purgeable_count;
413 	}
414 	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
415 
416 	size = count = mappable_size = mappable_count = 0;
417 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
418 		if (obj->fault_mappable) {
419 			size += i915_gem_obj_ggtt_size(obj);
420 			++count;
421 		}
422 		if (obj->pin_mappable) {
423 			mappable_size += i915_gem_obj_ggtt_size(obj);
424 			++mappable_count;
425 		}
426 		if (obj->madv == I915_MADV_DONTNEED) {
427 			purgeable_size += obj->base.size;
428 			++purgeable_count;
429 		}
430 	}
431 	seq_printf(m, "%u purgeable objects, %zu bytes\n",
432 		   purgeable_count, purgeable_size);
433 	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
434 		   mappable_count, mappable_size);
435 	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
436 		   count, size);
437 
438 	seq_printf(m, "%zu [%lu] gtt total\n",
439 		   dev_priv->gtt.base.total,
440 		   dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
441 
442 	seq_putc(m, '\n');
443 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
444 		struct file_stats stats;
445 		struct task_struct *task;
446 
447 		memset(&stats, 0, sizeof(stats));
448 		stats.file_priv = file->driver_priv;
449 		spin_lock(&file->table_lock);
450 		idr_for_each(&file->object_idr, per_file_stats, &stats);
451 		spin_unlock(&file->table_lock);
452 		/*
453 		 * Although we have a valid reference on file->pid, that does
454 		 * not guarantee that the task_struct who called get_pid() is
455 		 * still alive (e.g. get_pid(current) => fork() => exit()).
456 		 * Therefore, we need to protect this ->comm access using RCU.
457 		 */
458 		rcu_read_lock();
459 		task = pid_task(file->pid, PIDTYPE_PID);
460 		seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n",
461 			   task ? task->comm : "<unknown>",
462 			   stats.count,
463 			   stats.total,
464 			   stats.active,
465 			   stats.inactive,
466 			   stats.global,
467 			   stats.shared,
468 			   stats.unbound);
469 		rcu_read_unlock();
470 	}
471 
472 	mutex_unlock(&dev->struct_mutex);
473 
474 	return 0;
475 }
476 
477 static int i915_gem_gtt_info(struct seq_file *m, void *data)
478 {
479 	struct drm_info_node *node = m->private;
480 	struct drm_device *dev = node->minor->dev;
481 	uintptr_t list = (uintptr_t) node->info_ent->data;
482 	struct drm_i915_private *dev_priv = dev->dev_private;
483 	struct drm_i915_gem_object *obj;
484 	size_t total_obj_size, total_gtt_size;
485 	int count, ret;
486 
487 	ret = mutex_lock_interruptible(&dev->struct_mutex);
488 	if (ret)
489 		return ret;
490 
491 	total_obj_size = total_gtt_size = count = 0;
492 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
493 		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
494 			continue;
495 
496 		seq_puts(m, "   ");
497 		describe_obj(m, obj);
498 		seq_putc(m, '\n');
499 		total_obj_size += obj->base.size;
500 		total_gtt_size += i915_gem_obj_ggtt_size(obj);
501 		count++;
502 	}
503 
504 	mutex_unlock(&dev->struct_mutex);
505 
506 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
507 		   count, total_obj_size, total_gtt_size);
508 
509 	return 0;
510 }
511 
512 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
513 {
514 	struct drm_info_node *node = m->private;
515 	struct drm_device *dev = node->minor->dev;
516 	unsigned long flags;
517 	struct intel_crtc *crtc;
518 
519 	for_each_intel_crtc(dev, crtc) {
520 		const char pipe = pipe_name(crtc->pipe);
521 		const char plane = plane_name(crtc->plane);
522 		struct intel_unpin_work *work;
523 
524 		spin_lock_irqsave(&dev->event_lock, flags);
525 		work = crtc->unpin_work;
526 		if (work == NULL) {
527 			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
528 				   pipe, plane);
529 		} else {
530 			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
531 				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
532 					   pipe, plane);
533 			} else {
534 				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
535 					   pipe, plane);
536 			}
537 			if (work->enable_stall_check)
538 				seq_puts(m, "Stall check enabled, ");
539 			else
540 				seq_puts(m, "Stall check waiting for page flip ioctl, ");
541 			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
542 
543 			if (work->old_fb_obj) {
544 				struct drm_i915_gem_object *obj = work->old_fb_obj;
545 				if (obj)
546 					seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
547 						   i915_gem_obj_ggtt_offset(obj));
548 			}
549 			if (work->pending_flip_obj) {
550 				struct drm_i915_gem_object *obj = work->pending_flip_obj;
551 				if (obj)
552 					seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
553 						   i915_gem_obj_ggtt_offset(obj));
554 			}
555 		}
556 		spin_unlock_irqrestore(&dev->event_lock, flags);
557 	}
558 
559 	return 0;
560 }
561 
562 static int i915_gem_request_info(struct seq_file *m, void *data)
563 {
564 	struct drm_info_node *node = m->private;
565 	struct drm_device *dev = node->minor->dev;
566 	struct drm_i915_private *dev_priv = dev->dev_private;
567 	struct intel_engine_cs *ring;
568 	struct drm_i915_gem_request *gem_request;
569 	int ret, count, i;
570 
571 	ret = mutex_lock_interruptible(&dev->struct_mutex);
572 	if (ret)
573 		return ret;
574 
575 	count = 0;
576 	for_each_ring(ring, dev_priv, i) {
577 		if (list_empty(&ring->request_list))
578 			continue;
579 
580 		seq_printf(m, "%s requests:\n", ring->name);
581 		list_for_each_entry(gem_request,
582 				    &ring->request_list,
583 				    list) {
584 			seq_printf(m, "    %d @ %d\n",
585 				   gem_request->seqno,
586 				   (int) (jiffies - gem_request->emitted_jiffies));
587 		}
588 		count++;
589 	}
590 	mutex_unlock(&dev->struct_mutex);
591 
592 	if (count == 0)
593 		seq_puts(m, "No requests\n");
594 
595 	return 0;
596 }
597 
598 static void i915_ring_seqno_info(struct seq_file *m,
599 				 struct intel_engine_cs *ring)
600 {
601 	if (ring->get_seqno) {
602 		seq_printf(m, "Current sequence (%s): %u\n",
603 			   ring->name, ring->get_seqno(ring, false));
604 	}
605 }
606 
607 static int i915_gem_seqno_info(struct seq_file *m, void *data)
608 {
609 	struct drm_info_node *node = m->private;
610 	struct drm_device *dev = node->minor->dev;
611 	struct drm_i915_private *dev_priv = dev->dev_private;
612 	struct intel_engine_cs *ring;
613 	int ret, i;
614 
615 	ret = mutex_lock_interruptible(&dev->struct_mutex);
616 	if (ret)
617 		return ret;
618 	intel_runtime_pm_get(dev_priv);
619 
620 	for_each_ring(ring, dev_priv, i)
621 		i915_ring_seqno_info(m, ring);
622 
623 	intel_runtime_pm_put(dev_priv);
624 	mutex_unlock(&dev->struct_mutex);
625 
626 	return 0;
627 }
628 
629 
630 static int i915_interrupt_info(struct seq_file *m, void *data)
631 {
632 	struct drm_info_node *node = m->private;
633 	struct drm_device *dev = node->minor->dev;
634 	struct drm_i915_private *dev_priv = dev->dev_private;
635 	struct intel_engine_cs *ring;
636 	int ret, i, pipe;
637 
638 	ret = mutex_lock_interruptible(&dev->struct_mutex);
639 	if (ret)
640 		return ret;
641 	intel_runtime_pm_get(dev_priv);
642 
643 	if (IS_CHERRYVIEW(dev)) {
644 		int i;
645 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
646 			   I915_READ(GEN8_MASTER_IRQ));
647 
648 		seq_printf(m, "Display IER:\t%08x\n",
649 			   I915_READ(VLV_IER));
650 		seq_printf(m, "Display IIR:\t%08x\n",
651 			   I915_READ(VLV_IIR));
652 		seq_printf(m, "Display IIR_RW:\t%08x\n",
653 			   I915_READ(VLV_IIR_RW));
654 		seq_printf(m, "Display IMR:\t%08x\n",
655 			   I915_READ(VLV_IMR));
656 		for_each_pipe(pipe)
657 			seq_printf(m, "Pipe %c stat:\t%08x\n",
658 				   pipe_name(pipe),
659 				   I915_READ(PIPESTAT(pipe)));
660 
661 		seq_printf(m, "Port hotplug:\t%08x\n",
662 			   I915_READ(PORT_HOTPLUG_EN));
663 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
664 			   I915_READ(VLV_DPFLIPSTAT));
665 		seq_printf(m, "DPINVGTT:\t%08x\n",
666 			   I915_READ(DPINVGTT));
667 
668 		for (i = 0; i < 4; i++) {
669 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
670 				   i, I915_READ(GEN8_GT_IMR(i)));
671 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
672 				   i, I915_READ(GEN8_GT_IIR(i)));
673 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
674 				   i, I915_READ(GEN8_GT_IER(i)));
675 		}
676 
677 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
678 			   I915_READ(GEN8_PCU_IMR));
679 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
680 			   I915_READ(GEN8_PCU_IIR));
681 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
682 			   I915_READ(GEN8_PCU_IER));
683 	} else if (INTEL_INFO(dev)->gen >= 8) {
684 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
685 			   I915_READ(GEN8_MASTER_IRQ));
686 
687 		for (i = 0; i < 4; i++) {
688 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
689 				   i, I915_READ(GEN8_GT_IMR(i)));
690 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
691 				   i, I915_READ(GEN8_GT_IIR(i)));
692 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
693 				   i, I915_READ(GEN8_GT_IER(i)));
694 		}
695 
696 		for_each_pipe(pipe) {
697 			seq_printf(m, "Pipe %c IMR:\t%08x\n",
698 				   pipe_name(pipe),
699 				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
700 			seq_printf(m, "Pipe %c IIR:\t%08x\n",
701 				   pipe_name(pipe),
702 				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
703 			seq_printf(m, "Pipe %c IER:\t%08x\n",
704 				   pipe_name(pipe),
705 				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
706 		}
707 
708 		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
709 			   I915_READ(GEN8_DE_PORT_IMR));
710 		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
711 			   I915_READ(GEN8_DE_PORT_IIR));
712 		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
713 			   I915_READ(GEN8_DE_PORT_IER));
714 
715 		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
716 			   I915_READ(GEN8_DE_MISC_IMR));
717 		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
718 			   I915_READ(GEN8_DE_MISC_IIR));
719 		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
720 			   I915_READ(GEN8_DE_MISC_IER));
721 
722 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
723 			   I915_READ(GEN8_PCU_IMR));
724 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
725 			   I915_READ(GEN8_PCU_IIR));
726 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
727 			   I915_READ(GEN8_PCU_IER));
728 	} else if (IS_VALLEYVIEW(dev)) {
729 		seq_printf(m, "Display IER:\t%08x\n",
730 			   I915_READ(VLV_IER));
731 		seq_printf(m, "Display IIR:\t%08x\n",
732 			   I915_READ(VLV_IIR));
733 		seq_printf(m, "Display IIR_RW:\t%08x\n",
734 			   I915_READ(VLV_IIR_RW));
735 		seq_printf(m, "Display IMR:\t%08x\n",
736 			   I915_READ(VLV_IMR));
737 		for_each_pipe(pipe)
738 			seq_printf(m, "Pipe %c stat:\t%08x\n",
739 				   pipe_name(pipe),
740 				   I915_READ(PIPESTAT(pipe)));
741 
742 		seq_printf(m, "Master IER:\t%08x\n",
743 			   I915_READ(VLV_MASTER_IER));
744 
745 		seq_printf(m, "Render IER:\t%08x\n",
746 			   I915_READ(GTIER));
747 		seq_printf(m, "Render IIR:\t%08x\n",
748 			   I915_READ(GTIIR));
749 		seq_printf(m, "Render IMR:\t%08x\n",
750 			   I915_READ(GTIMR));
751 
752 		seq_printf(m, "PM IER:\t\t%08x\n",
753 			   I915_READ(GEN6_PMIER));
754 		seq_printf(m, "PM IIR:\t\t%08x\n",
755 			   I915_READ(GEN6_PMIIR));
756 		seq_printf(m, "PM IMR:\t\t%08x\n",
757 			   I915_READ(GEN6_PMIMR));
758 
759 		seq_printf(m, "Port hotplug:\t%08x\n",
760 			   I915_READ(PORT_HOTPLUG_EN));
761 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
762 			   I915_READ(VLV_DPFLIPSTAT));
763 		seq_printf(m, "DPINVGTT:\t%08x\n",
764 			   I915_READ(DPINVGTT));
765 
766 	} else if (!HAS_PCH_SPLIT(dev)) {
767 		seq_printf(m, "Interrupt enable:    %08x\n",
768 			   I915_READ(IER));
769 		seq_printf(m, "Interrupt identity:  %08x\n",
770 			   I915_READ(IIR));
771 		seq_printf(m, "Interrupt mask:      %08x\n",
772 			   I915_READ(IMR));
773 		for_each_pipe(pipe)
774 			seq_printf(m, "Pipe %c stat:         %08x\n",
775 				   pipe_name(pipe),
776 				   I915_READ(PIPESTAT(pipe)));
777 	} else {
778 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
779 			   I915_READ(DEIER));
780 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
781 			   I915_READ(DEIIR));
782 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
783 			   I915_READ(DEIMR));
784 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
785 			   I915_READ(SDEIER));
786 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
787 			   I915_READ(SDEIIR));
788 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
789 			   I915_READ(SDEIMR));
790 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
791 			   I915_READ(GTIER));
792 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
793 			   I915_READ(GTIIR));
794 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
795 			   I915_READ(GTIMR));
796 	}
797 	for_each_ring(ring, dev_priv, i) {
798 		if (INTEL_INFO(dev)->gen >= 6) {
799 			seq_printf(m,
800 				   "Graphics Interrupt mask (%s):	%08x\n",
801 				   ring->name, I915_READ_IMR(ring));
802 		}
803 		i915_ring_seqno_info(m, ring);
804 	}
805 	intel_runtime_pm_put(dev_priv);
806 	mutex_unlock(&dev->struct_mutex);
807 
808 	return 0;
809 }
810 
811 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
812 {
813 	struct drm_info_node *node = m->private;
814 	struct drm_device *dev = node->minor->dev;
815 	struct drm_i915_private *dev_priv = dev->dev_private;
816 	int i, ret;
817 
818 	ret = mutex_lock_interruptible(&dev->struct_mutex);
819 	if (ret)
820 		return ret;
821 
822 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
823 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
824 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
825 		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
826 
827 		seq_printf(m, "Fence %d, pin count = %d, object = ",
828 			   i, dev_priv->fence_regs[i].pin_count);
829 		if (obj == NULL)
830 			seq_puts(m, "unused");
831 		else
832 			describe_obj(m, obj);
833 		seq_putc(m, '\n');
834 	}
835 
836 	mutex_unlock(&dev->struct_mutex);
837 	return 0;
838 }
839 
840 static int i915_hws_info(struct seq_file *m, void *data)
841 {
842 	struct drm_info_node *node = m->private;
843 	struct drm_device *dev = node->minor->dev;
844 	struct drm_i915_private *dev_priv = dev->dev_private;
845 	struct intel_engine_cs *ring;
846 	const u32 *hws;
847 	int i;
848 
849 	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
850 	hws = ring->status_page.page_addr;
851 	if (hws == NULL)
852 		return 0;
853 
854 	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
855 		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
856 			   i * 4,
857 			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
858 	}
859 	return 0;
860 }
861 
862 static ssize_t
863 i915_error_state_write(struct file *filp,
864 		       const char __user *ubuf,
865 		       size_t cnt,
866 		       loff_t *ppos)
867 {
868 	struct i915_error_state_file_priv *error_priv = filp->private_data;
869 	struct drm_device *dev = error_priv->dev;
870 	int ret;
871 
872 	DRM_DEBUG_DRIVER("Resetting error state\n");
873 
874 	ret = mutex_lock_interruptible(&dev->struct_mutex);
875 	if (ret)
876 		return ret;
877 
878 	i915_destroy_error_state(dev);
879 	mutex_unlock(&dev->struct_mutex);
880 
881 	return cnt;
882 }
883 
884 static int i915_error_state_open(struct inode *inode, struct file *file)
885 {
886 	struct drm_device *dev = inode->i_private;
887 	struct i915_error_state_file_priv *error_priv;
888 
889 	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
890 	if (!error_priv)
891 		return -ENOMEM;
892 
893 	error_priv->dev = dev;
894 
895 	i915_error_state_get(dev, error_priv);
896 
897 	file->private_data = error_priv;
898 
899 	return 0;
900 }
901 
902 static int i915_error_state_release(struct inode *inode, struct file *file)
903 {
904 	struct i915_error_state_file_priv *error_priv = file->private_data;
905 
906 	i915_error_state_put(error_priv);
907 	kfree(error_priv);
908 
909 	return 0;
910 }
911 
912 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
913 				     size_t count, loff_t *pos)
914 {
915 	struct i915_error_state_file_priv *error_priv = file->private_data;
916 	struct drm_i915_error_state_buf error_str;
917 	loff_t tmp_pos = 0;
918 	ssize_t ret_count = 0;
919 	int ret;
920 
921 	ret = i915_error_state_buf_init(&error_str, count, *pos);
922 	if (ret)
923 		return ret;
924 
925 	ret = i915_error_state_to_str(&error_str, error_priv);
926 	if (ret)
927 		goto out;
928 
929 	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
930 					    error_str.buf,
931 					    error_str.bytes);
932 
933 	if (ret_count < 0)
934 		ret = ret_count;
935 	else
936 		*pos = error_str.start + ret_count;
937 out:
938 	i915_error_state_buf_release(&error_str);
939 	return ret ?: ret_count;
940 }
941 
942 static const struct file_operations i915_error_state_fops = {
943 	.owner = THIS_MODULE,
944 	.open = i915_error_state_open,
945 	.read = i915_error_state_read,
946 	.write = i915_error_state_write,
947 	.llseek = default_llseek,
948 	.release = i915_error_state_release,
949 };
950 
951 static int
952 i915_next_seqno_get(void *data, u64 *val)
953 {
954 	struct drm_device *dev = data;
955 	struct drm_i915_private *dev_priv = dev->dev_private;
956 	int ret;
957 
958 	ret = mutex_lock_interruptible(&dev->struct_mutex);
959 	if (ret)
960 		return ret;
961 
962 	*val = dev_priv->next_seqno;
963 	mutex_unlock(&dev->struct_mutex);
964 
965 	return 0;
966 }
967 
968 static int
969 i915_next_seqno_set(void *data, u64 val)
970 {
971 	struct drm_device *dev = data;
972 	int ret;
973 
974 	ret = mutex_lock_interruptible(&dev->struct_mutex);
975 	if (ret)
976 		return ret;
977 
978 	ret = i915_gem_set_seqno(dev, val);
979 	mutex_unlock(&dev->struct_mutex);
980 
981 	return ret;
982 }
983 
984 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
985 			i915_next_seqno_get, i915_next_seqno_set,
986 			"0x%llx\n");
987 
988 static int i915_rstdby_delays(struct seq_file *m, void *unused)
989 {
990 	struct drm_info_node *node = m->private;
991 	struct drm_device *dev = node->minor->dev;
992 	struct drm_i915_private *dev_priv = dev->dev_private;
993 	u16 crstanddelay;
994 	int ret;
995 
996 	ret = mutex_lock_interruptible(&dev->struct_mutex);
997 	if (ret)
998 		return ret;
999 	intel_runtime_pm_get(dev_priv);
1000 
1001 	crstanddelay = I915_READ16(CRSTANDVID);
1002 
1003 	intel_runtime_pm_put(dev_priv);
1004 	mutex_unlock(&dev->struct_mutex);
1005 
1006 	seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
1007 
1008 	return 0;
1009 }
1010 
1011 static int i915_frequency_info(struct seq_file *m, void *unused)
1012 {
1013 	struct drm_info_node *node = m->private;
1014 	struct drm_device *dev = node->minor->dev;
1015 	struct drm_i915_private *dev_priv = dev->dev_private;
1016 	int ret = 0;
1017 
1018 	intel_runtime_pm_get(dev_priv);
1019 
1020 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1021 
1022 	if (IS_GEN5(dev)) {
1023 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1024 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1025 
1026 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1027 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1028 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1029 			   MEMSTAT_VID_SHIFT);
1030 		seq_printf(m, "Current P-state: %d\n",
1031 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1032 	} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
1033 		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1034 		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1035 		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1036 		u32 rpmodectl, rpinclimit, rpdeclimit;
1037 		u32 rpstat, cagf, reqf;
1038 		u32 rpupei, rpcurup, rpprevup;
1039 		u32 rpdownei, rpcurdown, rpprevdown;
1040 		int max_freq;
1041 
1042 		/* RPSTAT1 is in the GT power well */
1043 		ret = mutex_lock_interruptible(&dev->struct_mutex);
1044 		if (ret)
1045 			goto out;
1046 
1047 		gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
1048 
1049 		reqf = I915_READ(GEN6_RPNSWREQ);
1050 		reqf &= ~GEN6_TURBO_DISABLE;
1051 		if (IS_HASWELL(dev))
1052 			reqf >>= 24;
1053 		else
1054 			reqf >>= 25;
1055 		reqf *= GT_FREQUENCY_MULTIPLIER;
1056 
1057 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1058 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1059 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1060 
1061 		rpstat = I915_READ(GEN6_RPSTAT1);
1062 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1063 		rpcurup = I915_READ(GEN6_RP_CUR_UP);
1064 		rpprevup = I915_READ(GEN6_RP_PREV_UP);
1065 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1066 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1067 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1068 		if (IS_HASWELL(dev))
1069 			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1070 		else
1071 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1072 		cagf *= GT_FREQUENCY_MULTIPLIER;
1073 
1074 		gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
1075 		mutex_unlock(&dev->struct_mutex);
1076 
1077 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1078 			   I915_READ(GEN6_PMIER),
1079 			   I915_READ(GEN6_PMIMR),
1080 			   I915_READ(GEN6_PMISR),
1081 			   I915_READ(GEN6_PMIIR),
1082 			   I915_READ(GEN6_PMINTRMSK));
1083 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1084 		seq_printf(m, "Render p-state ratio: %d\n",
1085 			   (gt_perf_status & 0xff00) >> 8);
1086 		seq_printf(m, "Render p-state VID: %d\n",
1087 			   gt_perf_status & 0xff);
1088 		seq_printf(m, "Render p-state limit: %d\n",
1089 			   rp_state_limits & 0xff);
1090 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1091 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1092 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1093 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1094 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1095 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1096 		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1097 			   GEN6_CURICONT_MASK);
1098 		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1099 			   GEN6_CURBSYTAVG_MASK);
1100 		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1101 			   GEN6_CURBSYTAVG_MASK);
1102 		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1103 			   GEN6_CURIAVG_MASK);
1104 		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1105 			   GEN6_CURBSYTAVG_MASK);
1106 		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1107 			   GEN6_CURBSYTAVG_MASK);
1108 
1109 		max_freq = (rp_state_cap & 0xff0000) >> 16;
1110 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1111 			   max_freq * GT_FREQUENCY_MULTIPLIER);
1112 
1113 		max_freq = (rp_state_cap & 0xff00) >> 8;
1114 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1115 			   max_freq * GT_FREQUENCY_MULTIPLIER);
1116 
1117 		max_freq = rp_state_cap & 0xff;
1118 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1119 			   max_freq * GT_FREQUENCY_MULTIPLIER);
1120 
1121 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1122 			   dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
1123 	} else if (IS_VALLEYVIEW(dev)) {
1124 		u32 freq_sts, val;
1125 
1126 		mutex_lock(&dev_priv->rps.hw_lock);
1127 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1128 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1129 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1130 
1131 		val = valleyview_rps_max_freq(dev_priv);
1132 		seq_printf(m, "max GPU freq: %d MHz\n",
1133 			   vlv_gpu_freq(dev_priv, val));
1134 
1135 		val = valleyview_rps_min_freq(dev_priv);
1136 		seq_printf(m, "min GPU freq: %d MHz\n",
1137 			   vlv_gpu_freq(dev_priv, val));
1138 
1139 		seq_printf(m, "current GPU freq: %d MHz\n",
1140 			   vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1141 		mutex_unlock(&dev_priv->rps.hw_lock);
1142 	} else {
1143 		seq_puts(m, "no P-state info available\n");
1144 	}
1145 
1146 out:
1147 	intel_runtime_pm_put(dev_priv);
1148 	return ret;
1149 }
1150 
1151 static int i915_delayfreq_table(struct seq_file *m, void *unused)
1152 {
1153 	struct drm_info_node *node = m->private;
1154 	struct drm_device *dev = node->minor->dev;
1155 	struct drm_i915_private *dev_priv = dev->dev_private;
1156 	u32 delayfreq;
1157 	int ret, i;
1158 
1159 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1160 	if (ret)
1161 		return ret;
1162 	intel_runtime_pm_get(dev_priv);
1163 
1164 	for (i = 0; i < 16; i++) {
1165 		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
1166 		seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
1167 			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
1168 	}
1169 
1170 	intel_runtime_pm_put(dev_priv);
1171 
1172 	mutex_unlock(&dev->struct_mutex);
1173 
1174 	return 0;
1175 }
1176 
1177 static inline int MAP_TO_MV(int map)
1178 {
1179 	return 1250 - (map * 25);
1180 }
1181 
1182 static int i915_inttoext_table(struct seq_file *m, void *unused)
1183 {
1184 	struct drm_info_node *node = m->private;
1185 	struct drm_device *dev = node->minor->dev;
1186 	struct drm_i915_private *dev_priv = dev->dev_private;
1187 	u32 inttoext;
1188 	int ret, i;
1189 
1190 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1191 	if (ret)
1192 		return ret;
1193 	intel_runtime_pm_get(dev_priv);
1194 
1195 	for (i = 1; i <= 32; i++) {
1196 		inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1197 		seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1198 	}
1199 
1200 	intel_runtime_pm_put(dev_priv);
1201 	mutex_unlock(&dev->struct_mutex);
1202 
1203 	return 0;
1204 }
1205 
1206 static int ironlake_drpc_info(struct seq_file *m)
1207 {
1208 	struct drm_info_node *node = m->private;
1209 	struct drm_device *dev = node->minor->dev;
1210 	struct drm_i915_private *dev_priv = dev->dev_private;
1211 	u32 rgvmodectl, rstdbyctl;
1212 	u16 crstandvid;
1213 	int ret;
1214 
1215 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1216 	if (ret)
1217 		return ret;
1218 	intel_runtime_pm_get(dev_priv);
1219 
1220 	rgvmodectl = I915_READ(MEMMODECTL);
1221 	rstdbyctl = I915_READ(RSTDBYCTL);
1222 	crstandvid = I915_READ16(CRSTANDVID);
1223 
1224 	intel_runtime_pm_put(dev_priv);
1225 	mutex_unlock(&dev->struct_mutex);
1226 
1227 	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1228 		   "yes" : "no");
1229 	seq_printf(m, "Boost freq: %d\n",
1230 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1231 		   MEMMODE_BOOST_FREQ_SHIFT);
1232 	seq_printf(m, "HW control enabled: %s\n",
1233 		   rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1234 	seq_printf(m, "SW control enabled: %s\n",
1235 		   rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1236 	seq_printf(m, "Gated voltage change: %s\n",
1237 		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1238 	seq_printf(m, "Starting frequency: P%d\n",
1239 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1240 	seq_printf(m, "Max P-state: P%d\n",
1241 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1242 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1243 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1244 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1245 	seq_printf(m, "Render standby enabled: %s\n",
1246 		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1247 	seq_puts(m, "Current RS state: ");
1248 	switch (rstdbyctl & RSX_STATUS_MASK) {
1249 	case RSX_STATUS_ON:
1250 		seq_puts(m, "on\n");
1251 		break;
1252 	case RSX_STATUS_RC1:
1253 		seq_puts(m, "RC1\n");
1254 		break;
1255 	case RSX_STATUS_RC1E:
1256 		seq_puts(m, "RC1E\n");
1257 		break;
1258 	case RSX_STATUS_RS1:
1259 		seq_puts(m, "RS1\n");
1260 		break;
1261 	case RSX_STATUS_RS2:
1262 		seq_puts(m, "RS2 (RC6)\n");
1263 		break;
1264 	case RSX_STATUS_RS3:
1265 		seq_puts(m, "RC3 (RC6+)\n");
1266 		break;
1267 	default:
1268 		seq_puts(m, "unknown\n");
1269 		break;
1270 	}
1271 
1272 	return 0;
1273 }
1274 
1275 static int vlv_drpc_info(struct seq_file *m)
1276 {
1277 
1278 	struct drm_info_node *node = m->private;
1279 	struct drm_device *dev = node->minor->dev;
1280 	struct drm_i915_private *dev_priv = dev->dev_private;
1281 	u32 rpmodectl1, rcctl1;
1282 	unsigned fw_rendercount = 0, fw_mediacount = 0;
1283 
1284 	intel_runtime_pm_get(dev_priv);
1285 
1286 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1287 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1288 
1289 	intel_runtime_pm_put(dev_priv);
1290 
1291 	seq_printf(m, "Video Turbo Mode: %s\n",
1292 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1293 	seq_printf(m, "Turbo enabled: %s\n",
1294 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1295 	seq_printf(m, "HW control enabled: %s\n",
1296 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1297 	seq_printf(m, "SW control enabled: %s\n",
1298 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1299 			  GEN6_RP_MEDIA_SW_MODE));
1300 	seq_printf(m, "RC6 Enabled: %s\n",
1301 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1302 					GEN6_RC_CTL_EI_MODE(1))));
1303 	seq_printf(m, "Render Power Well: %s\n",
1304 			(I915_READ(VLV_GTLC_PW_STATUS) &
1305 				VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1306 	seq_printf(m, "Media Power Well: %s\n",
1307 			(I915_READ(VLV_GTLC_PW_STATUS) &
1308 				VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1309 
1310 	seq_printf(m, "Render RC6 residency since boot: %u\n",
1311 		   I915_READ(VLV_GT_RENDER_RC6));
1312 	seq_printf(m, "Media RC6 residency since boot: %u\n",
1313 		   I915_READ(VLV_GT_MEDIA_RC6));
1314 
1315 	spin_lock_irq(&dev_priv->uncore.lock);
1316 	fw_rendercount = dev_priv->uncore.fw_rendercount;
1317 	fw_mediacount = dev_priv->uncore.fw_mediacount;
1318 	spin_unlock_irq(&dev_priv->uncore.lock);
1319 
1320 	seq_printf(m, "Forcewake Render Count = %u\n", fw_rendercount);
1321 	seq_printf(m, "Forcewake Media Count = %u\n", fw_mediacount);
1322 
1323 
1324 	return 0;
1325 }
1326 
1327 
1328 static int gen6_drpc_info(struct seq_file *m)
1329 {
1330 
1331 	struct drm_info_node *node = m->private;
1332 	struct drm_device *dev = node->minor->dev;
1333 	struct drm_i915_private *dev_priv = dev->dev_private;
1334 	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1335 	unsigned forcewake_count;
1336 	int count = 0, ret;
1337 
1338 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1339 	if (ret)
1340 		return ret;
1341 	intel_runtime_pm_get(dev_priv);
1342 
1343 	spin_lock_irq(&dev_priv->uncore.lock);
1344 	forcewake_count = dev_priv->uncore.forcewake_count;
1345 	spin_unlock_irq(&dev_priv->uncore.lock);
1346 
1347 	if (forcewake_count) {
1348 		seq_puts(m, "RC information inaccurate because somebody "
1349 			    "holds a forcewake reference \n");
1350 	} else {
1351 		/* NB: we cannot use forcewake, else we read the wrong values */
1352 		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1353 			udelay(10);
1354 		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1355 	}
1356 
1357 	gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1358 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1359 
1360 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1361 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1362 	mutex_unlock(&dev->struct_mutex);
1363 	mutex_lock(&dev_priv->rps.hw_lock);
1364 	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1365 	mutex_unlock(&dev_priv->rps.hw_lock);
1366 
1367 	intel_runtime_pm_put(dev_priv);
1368 
1369 	seq_printf(m, "Video Turbo Mode: %s\n",
1370 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1371 	seq_printf(m, "HW control enabled: %s\n",
1372 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1373 	seq_printf(m, "SW control enabled: %s\n",
1374 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1375 			  GEN6_RP_MEDIA_SW_MODE));
1376 	seq_printf(m, "RC1e Enabled: %s\n",
1377 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1378 	seq_printf(m, "RC6 Enabled: %s\n",
1379 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1380 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1381 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1382 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1383 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1384 	seq_puts(m, "Current RC state: ");
1385 	switch (gt_core_status & GEN6_RCn_MASK) {
1386 	case GEN6_RC0:
1387 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1388 			seq_puts(m, "Core Power Down\n");
1389 		else
1390 			seq_puts(m, "on\n");
1391 		break;
1392 	case GEN6_RC3:
1393 		seq_puts(m, "RC3\n");
1394 		break;
1395 	case GEN6_RC6:
1396 		seq_puts(m, "RC6\n");
1397 		break;
1398 	case GEN6_RC7:
1399 		seq_puts(m, "RC7\n");
1400 		break;
1401 	default:
1402 		seq_puts(m, "Unknown\n");
1403 		break;
1404 	}
1405 
1406 	seq_printf(m, "Core Power Down: %s\n",
1407 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1408 
1409 	/* Not exactly sure what this is */
1410 	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1411 		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1412 	seq_printf(m, "RC6 residency since boot: %u\n",
1413 		   I915_READ(GEN6_GT_GFX_RC6));
1414 	seq_printf(m, "RC6+ residency since boot: %u\n",
1415 		   I915_READ(GEN6_GT_GFX_RC6p));
1416 	seq_printf(m, "RC6++ residency since boot: %u\n",
1417 		   I915_READ(GEN6_GT_GFX_RC6pp));
1418 
1419 	seq_printf(m, "RC6   voltage: %dmV\n",
1420 		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1421 	seq_printf(m, "RC6+  voltage: %dmV\n",
1422 		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1423 	seq_printf(m, "RC6++ voltage: %dmV\n",
1424 		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1425 	return 0;
1426 }
1427 
1428 static int i915_drpc_info(struct seq_file *m, void *unused)
1429 {
1430 	struct drm_info_node *node = m->private;
1431 	struct drm_device *dev = node->minor->dev;
1432 
1433 	if (IS_VALLEYVIEW(dev))
1434 		return vlv_drpc_info(m);
1435 	else if (IS_GEN6(dev) || IS_GEN7(dev))
1436 		return gen6_drpc_info(m);
1437 	else
1438 		return ironlake_drpc_info(m);
1439 }
1440 
1441 static int i915_fbc_status(struct seq_file *m, void *unused)
1442 {
1443 	struct drm_info_node *node = m->private;
1444 	struct drm_device *dev = node->minor->dev;
1445 	struct drm_i915_private *dev_priv = dev->dev_private;
1446 
1447 	if (!HAS_FBC(dev)) {
1448 		seq_puts(m, "FBC unsupported on this chipset\n");
1449 		return 0;
1450 	}
1451 
1452 	intel_runtime_pm_get(dev_priv);
1453 
1454 	if (intel_fbc_enabled(dev)) {
1455 		seq_puts(m, "FBC enabled\n");
1456 	} else {
1457 		seq_puts(m, "FBC disabled: ");
1458 		switch (dev_priv->fbc.no_fbc_reason) {
1459 		case FBC_OK:
1460 			seq_puts(m, "FBC actived, but currently disabled in hardware");
1461 			break;
1462 		case FBC_UNSUPPORTED:
1463 			seq_puts(m, "unsupported by this chipset");
1464 			break;
1465 		case FBC_NO_OUTPUT:
1466 			seq_puts(m, "no outputs");
1467 			break;
1468 		case FBC_STOLEN_TOO_SMALL:
1469 			seq_puts(m, "not enough stolen memory");
1470 			break;
1471 		case FBC_UNSUPPORTED_MODE:
1472 			seq_puts(m, "mode not supported");
1473 			break;
1474 		case FBC_MODE_TOO_LARGE:
1475 			seq_puts(m, "mode too large");
1476 			break;
1477 		case FBC_BAD_PLANE:
1478 			seq_puts(m, "FBC unsupported on plane");
1479 			break;
1480 		case FBC_NOT_TILED:
1481 			seq_puts(m, "scanout buffer not tiled");
1482 			break;
1483 		case FBC_MULTIPLE_PIPES:
1484 			seq_puts(m, "multiple pipes are enabled");
1485 			break;
1486 		case FBC_MODULE_PARAM:
1487 			seq_puts(m, "disabled per module param (default off)");
1488 			break;
1489 		case FBC_CHIP_DEFAULT:
1490 			seq_puts(m, "disabled per chip default");
1491 			break;
1492 		default:
1493 			seq_puts(m, "unknown reason");
1494 		}
1495 		seq_putc(m, '\n');
1496 	}
1497 
1498 	intel_runtime_pm_put(dev_priv);
1499 
1500 	return 0;
1501 }
1502 
1503 static int i915_ips_status(struct seq_file *m, void *unused)
1504 {
1505 	struct drm_info_node *node = m->private;
1506 	struct drm_device *dev = node->minor->dev;
1507 	struct drm_i915_private *dev_priv = dev->dev_private;
1508 
1509 	if (!HAS_IPS(dev)) {
1510 		seq_puts(m, "not supported\n");
1511 		return 0;
1512 	}
1513 
1514 	intel_runtime_pm_get(dev_priv);
1515 
1516 	if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
1517 		seq_puts(m, "enabled\n");
1518 	else
1519 		seq_puts(m, "disabled\n");
1520 
1521 	intel_runtime_pm_put(dev_priv);
1522 
1523 	return 0;
1524 }
1525 
1526 static int i915_sr_status(struct seq_file *m, void *unused)
1527 {
1528 	struct drm_info_node *node = m->private;
1529 	struct drm_device *dev = node->minor->dev;
1530 	struct drm_i915_private *dev_priv = dev->dev_private;
1531 	bool sr_enabled = false;
1532 
1533 	intel_runtime_pm_get(dev_priv);
1534 
1535 	if (HAS_PCH_SPLIT(dev))
1536 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1537 	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1538 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1539 	else if (IS_I915GM(dev))
1540 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1541 	else if (IS_PINEVIEW(dev))
1542 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1543 
1544 	intel_runtime_pm_put(dev_priv);
1545 
1546 	seq_printf(m, "self-refresh: %s\n",
1547 		   sr_enabled ? "enabled" : "disabled");
1548 
1549 	return 0;
1550 }
1551 
1552 static int i915_emon_status(struct seq_file *m, void *unused)
1553 {
1554 	struct drm_info_node *node = m->private;
1555 	struct drm_device *dev = node->minor->dev;
1556 	struct drm_i915_private *dev_priv = dev->dev_private;
1557 	unsigned long temp, chipset, gfx;
1558 	int ret;
1559 
1560 	if (!IS_GEN5(dev))
1561 		return -ENODEV;
1562 
1563 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1564 	if (ret)
1565 		return ret;
1566 
1567 	temp = i915_mch_val(dev_priv);
1568 	chipset = i915_chipset_val(dev_priv);
1569 	gfx = i915_gfx_val(dev_priv);
1570 	mutex_unlock(&dev->struct_mutex);
1571 
1572 	seq_printf(m, "GMCH temp: %ld\n", temp);
1573 	seq_printf(m, "Chipset power: %ld\n", chipset);
1574 	seq_printf(m, "GFX power: %ld\n", gfx);
1575 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1576 
1577 	return 0;
1578 }
1579 
1580 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1581 {
1582 	struct drm_info_node *node = m->private;
1583 	struct drm_device *dev = node->minor->dev;
1584 	struct drm_i915_private *dev_priv = dev->dev_private;
1585 	int ret = 0;
1586 	int gpu_freq, ia_freq;
1587 
1588 	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1589 		seq_puts(m, "unsupported on this chipset\n");
1590 		return 0;
1591 	}
1592 
1593 	intel_runtime_pm_get(dev_priv);
1594 
1595 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1596 
1597 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1598 	if (ret)
1599 		goto out;
1600 
1601 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1602 
1603 	for (gpu_freq = dev_priv->rps.min_freq_softlimit;
1604 	     gpu_freq <= dev_priv->rps.max_freq_softlimit;
1605 	     gpu_freq++) {
1606 		ia_freq = gpu_freq;
1607 		sandybridge_pcode_read(dev_priv,
1608 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1609 				       &ia_freq);
1610 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1611 			   gpu_freq * GT_FREQUENCY_MULTIPLIER,
1612 			   ((ia_freq >> 0) & 0xff) * 100,
1613 			   ((ia_freq >> 8) & 0xff) * 100);
1614 	}
1615 
1616 	mutex_unlock(&dev_priv->rps.hw_lock);
1617 
1618 out:
1619 	intel_runtime_pm_put(dev_priv);
1620 	return ret;
1621 }
1622 
1623 static int i915_gfxec(struct seq_file *m, void *unused)
1624 {
1625 	struct drm_info_node *node = m->private;
1626 	struct drm_device *dev = node->minor->dev;
1627 	struct drm_i915_private *dev_priv = dev->dev_private;
1628 	int ret;
1629 
1630 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1631 	if (ret)
1632 		return ret;
1633 	intel_runtime_pm_get(dev_priv);
1634 
1635 	seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1636 	intel_runtime_pm_put(dev_priv);
1637 
1638 	mutex_unlock(&dev->struct_mutex);
1639 
1640 	return 0;
1641 }
1642 
1643 static int i915_opregion(struct seq_file *m, void *unused)
1644 {
1645 	struct drm_info_node *node = m->private;
1646 	struct drm_device *dev = node->minor->dev;
1647 	struct drm_i915_private *dev_priv = dev->dev_private;
1648 	struct intel_opregion *opregion = &dev_priv->opregion;
1649 	void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1650 	int ret;
1651 
1652 	if (data == NULL)
1653 		return -ENOMEM;
1654 
1655 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1656 	if (ret)
1657 		goto out;
1658 
1659 	if (opregion->header) {
1660 		memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1661 		seq_write(m, data, OPREGION_SIZE);
1662 	}
1663 
1664 	mutex_unlock(&dev->struct_mutex);
1665 
1666 out:
1667 	kfree(data);
1668 	return 0;
1669 }
1670 
1671 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1672 {
1673 	struct drm_info_node *node = m->private;
1674 	struct drm_device *dev = node->minor->dev;
1675 	struct intel_fbdev *ifbdev = NULL;
1676 	struct intel_framebuffer *fb;
1677 
1678 #ifdef CONFIG_DRM_I915_FBDEV
1679 	struct drm_i915_private *dev_priv = dev->dev_private;
1680 	int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1681 	if (ret)
1682 		return ret;
1683 
1684 	ifbdev = dev_priv->fbdev;
1685 	fb = to_intel_framebuffer(ifbdev->helper.fb);
1686 
1687 	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1688 		   fb->base.width,
1689 		   fb->base.height,
1690 		   fb->base.depth,
1691 		   fb->base.bits_per_pixel,
1692 		   atomic_read(&fb->base.refcount.refcount));
1693 	describe_obj(m, fb->obj);
1694 	seq_putc(m, '\n');
1695 	mutex_unlock(&dev->mode_config.mutex);
1696 #endif
1697 
1698 	mutex_lock(&dev->mode_config.fb_lock);
1699 	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1700 		if (ifbdev && &fb->base == ifbdev->helper.fb)
1701 			continue;
1702 
1703 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1704 			   fb->base.width,
1705 			   fb->base.height,
1706 			   fb->base.depth,
1707 			   fb->base.bits_per_pixel,
1708 			   atomic_read(&fb->base.refcount.refcount));
1709 		describe_obj(m, fb->obj);
1710 		seq_putc(m, '\n');
1711 	}
1712 	mutex_unlock(&dev->mode_config.fb_lock);
1713 
1714 	return 0;
1715 }
1716 
1717 static int i915_context_status(struct seq_file *m, void *unused)
1718 {
1719 	struct drm_info_node *node = m->private;
1720 	struct drm_device *dev = node->minor->dev;
1721 	struct drm_i915_private *dev_priv = dev->dev_private;
1722 	struct intel_engine_cs *ring;
1723 	struct intel_context *ctx;
1724 	int ret, i;
1725 
1726 	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1727 	if (ret)
1728 		return ret;
1729 
1730 	if (dev_priv->ips.pwrctx) {
1731 		seq_puts(m, "power context ");
1732 		describe_obj(m, dev_priv->ips.pwrctx);
1733 		seq_putc(m, '\n');
1734 	}
1735 
1736 	if (dev_priv->ips.renderctx) {
1737 		seq_puts(m, "render context ");
1738 		describe_obj(m, dev_priv->ips.renderctx);
1739 		seq_putc(m, '\n');
1740 	}
1741 
1742 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1743 		if (ctx->obj == NULL)
1744 			continue;
1745 
1746 		seq_puts(m, "HW context ");
1747 		describe_ctx(m, ctx);
1748 		for_each_ring(ring, dev_priv, i)
1749 			if (ring->default_context == ctx)
1750 				seq_printf(m, "(default context %s) ", ring->name);
1751 
1752 		describe_obj(m, ctx->obj);
1753 		seq_putc(m, '\n');
1754 	}
1755 
1756 	mutex_unlock(&dev->mode_config.mutex);
1757 
1758 	return 0;
1759 }
1760 
1761 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1762 {
1763 	struct drm_info_node *node = m->private;
1764 	struct drm_device *dev = node->minor->dev;
1765 	struct drm_i915_private *dev_priv = dev->dev_private;
1766 	unsigned forcewake_count = 0, fw_rendercount = 0, fw_mediacount = 0;
1767 
1768 	spin_lock_irq(&dev_priv->uncore.lock);
1769 	if (IS_VALLEYVIEW(dev)) {
1770 		fw_rendercount = dev_priv->uncore.fw_rendercount;
1771 		fw_mediacount = dev_priv->uncore.fw_mediacount;
1772 	} else
1773 		forcewake_count = dev_priv->uncore.forcewake_count;
1774 	spin_unlock_irq(&dev_priv->uncore.lock);
1775 
1776 	if (IS_VALLEYVIEW(dev)) {
1777 		seq_printf(m, "fw_rendercount = %u\n", fw_rendercount);
1778 		seq_printf(m, "fw_mediacount = %u\n", fw_mediacount);
1779 	} else
1780 		seq_printf(m, "forcewake count = %u\n", forcewake_count);
1781 
1782 	return 0;
1783 }
1784 
1785 static const char *swizzle_string(unsigned swizzle)
1786 {
1787 	switch (swizzle) {
1788 	case I915_BIT_6_SWIZZLE_NONE:
1789 		return "none";
1790 	case I915_BIT_6_SWIZZLE_9:
1791 		return "bit9";
1792 	case I915_BIT_6_SWIZZLE_9_10:
1793 		return "bit9/bit10";
1794 	case I915_BIT_6_SWIZZLE_9_11:
1795 		return "bit9/bit11";
1796 	case I915_BIT_6_SWIZZLE_9_10_11:
1797 		return "bit9/bit10/bit11";
1798 	case I915_BIT_6_SWIZZLE_9_17:
1799 		return "bit9/bit17";
1800 	case I915_BIT_6_SWIZZLE_9_10_17:
1801 		return "bit9/bit10/bit17";
1802 	case I915_BIT_6_SWIZZLE_UNKNOWN:
1803 		return "unknown";
1804 	}
1805 
1806 	return "bug";
1807 }
1808 
1809 static int i915_swizzle_info(struct seq_file *m, void *data)
1810 {
1811 	struct drm_info_node *node = m->private;
1812 	struct drm_device *dev = node->minor->dev;
1813 	struct drm_i915_private *dev_priv = dev->dev_private;
1814 	int ret;
1815 
1816 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1817 	if (ret)
1818 		return ret;
1819 	intel_runtime_pm_get(dev_priv);
1820 
1821 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1822 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1823 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1824 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1825 
1826 	if (IS_GEN3(dev) || IS_GEN4(dev)) {
1827 		seq_printf(m, "DDC = 0x%08x\n",
1828 			   I915_READ(DCC));
1829 		seq_printf(m, "C0DRB3 = 0x%04x\n",
1830 			   I915_READ16(C0DRB3));
1831 		seq_printf(m, "C1DRB3 = 0x%04x\n",
1832 			   I915_READ16(C1DRB3));
1833 	} else if (INTEL_INFO(dev)->gen >= 6) {
1834 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1835 			   I915_READ(MAD_DIMM_C0));
1836 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1837 			   I915_READ(MAD_DIMM_C1));
1838 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1839 			   I915_READ(MAD_DIMM_C2));
1840 		seq_printf(m, "TILECTL = 0x%08x\n",
1841 			   I915_READ(TILECTL));
1842 		if (IS_GEN8(dev))
1843 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1844 				   I915_READ(GAMTARBMODE));
1845 		else
1846 			seq_printf(m, "ARB_MODE = 0x%08x\n",
1847 				   I915_READ(ARB_MODE));
1848 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1849 			   I915_READ(DISP_ARB_CTL));
1850 	}
1851 	intel_runtime_pm_put(dev_priv);
1852 	mutex_unlock(&dev->struct_mutex);
1853 
1854 	return 0;
1855 }
1856 
1857 static int per_file_ctx(int id, void *ptr, void *data)
1858 {
1859 	struct intel_context *ctx = ptr;
1860 	struct seq_file *m = data;
1861 	struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(ctx);
1862 
1863 	if (i915_gem_context_is_default(ctx))
1864 		seq_puts(m, "  default context:\n");
1865 	else
1866 		seq_printf(m, "  context %d:\n", ctx->id);
1867 	ppgtt->debug_dump(ppgtt, m);
1868 
1869 	return 0;
1870 }
1871 
1872 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1873 {
1874 	struct drm_i915_private *dev_priv = dev->dev_private;
1875 	struct intel_engine_cs *ring;
1876 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1877 	int unused, i;
1878 
1879 	if (!ppgtt)
1880 		return;
1881 
1882 	seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
1883 	seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
1884 	for_each_ring(ring, dev_priv, unused) {
1885 		seq_printf(m, "%s\n", ring->name);
1886 		for (i = 0; i < 4; i++) {
1887 			u32 offset = 0x270 + i * 8;
1888 			u64 pdp = I915_READ(ring->mmio_base + offset + 4);
1889 			pdp <<= 32;
1890 			pdp |= I915_READ(ring->mmio_base + offset);
1891 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
1892 		}
1893 	}
1894 }
1895 
1896 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
1897 {
1898 	struct drm_i915_private *dev_priv = dev->dev_private;
1899 	struct intel_engine_cs *ring;
1900 	struct drm_file *file;
1901 	int i;
1902 
1903 	if (INTEL_INFO(dev)->gen == 6)
1904 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1905 
1906 	for_each_ring(ring, dev_priv, i) {
1907 		seq_printf(m, "%s\n", ring->name);
1908 		if (INTEL_INFO(dev)->gen == 7)
1909 			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1910 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1911 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1912 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1913 	}
1914 	if (dev_priv->mm.aliasing_ppgtt) {
1915 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1916 
1917 		seq_puts(m, "aliasing PPGTT:\n");
1918 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1919 
1920 		ppgtt->debug_dump(ppgtt, m);
1921 	} else
1922 		return;
1923 
1924 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
1925 		struct drm_i915_file_private *file_priv = file->driver_priv;
1926 
1927 		seq_printf(m, "proc: %s\n",
1928 			   get_pid_task(file->pid, PIDTYPE_PID)->comm);
1929 		idr_for_each(&file_priv->context_idr, per_file_ctx, m);
1930 	}
1931 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1932 }
1933 
1934 static int i915_ppgtt_info(struct seq_file *m, void *data)
1935 {
1936 	struct drm_info_node *node = m->private;
1937 	struct drm_device *dev = node->minor->dev;
1938 	struct drm_i915_private *dev_priv = dev->dev_private;
1939 
1940 	int ret = mutex_lock_interruptible(&dev->struct_mutex);
1941 	if (ret)
1942 		return ret;
1943 	intel_runtime_pm_get(dev_priv);
1944 
1945 	if (INTEL_INFO(dev)->gen >= 8)
1946 		gen8_ppgtt_info(m, dev);
1947 	else if (INTEL_INFO(dev)->gen >= 6)
1948 		gen6_ppgtt_info(m, dev);
1949 
1950 	intel_runtime_pm_put(dev_priv);
1951 	mutex_unlock(&dev->struct_mutex);
1952 
1953 	return 0;
1954 }
1955 
1956 static int i915_llc(struct seq_file *m, void *data)
1957 {
1958 	struct drm_info_node *node = m->private;
1959 	struct drm_device *dev = node->minor->dev;
1960 	struct drm_i915_private *dev_priv = dev->dev_private;
1961 
1962 	/* Size calculation for LLC is a bit of a pain. Ignore for now. */
1963 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
1964 	seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
1965 
1966 	return 0;
1967 }
1968 
1969 static int i915_edp_psr_status(struct seq_file *m, void *data)
1970 {
1971 	struct drm_info_node *node = m->private;
1972 	struct drm_device *dev = node->minor->dev;
1973 	struct drm_i915_private *dev_priv = dev->dev_private;
1974 	u32 psrperf = 0;
1975 	bool enabled = false;
1976 
1977 	intel_runtime_pm_get(dev_priv);
1978 
1979 	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
1980 	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
1981 
1982 	enabled = HAS_PSR(dev) &&
1983 		I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1984 	seq_printf(m, "Enabled: %s\n", yesno(enabled));
1985 
1986 	if (HAS_PSR(dev))
1987 		psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
1988 			EDP_PSR_PERF_CNT_MASK;
1989 	seq_printf(m, "Performance_Counter: %u\n", psrperf);
1990 
1991 	intel_runtime_pm_put(dev_priv);
1992 	return 0;
1993 }
1994 
1995 static int i915_sink_crc(struct seq_file *m, void *data)
1996 {
1997 	struct drm_info_node *node = m->private;
1998 	struct drm_device *dev = node->minor->dev;
1999 	struct intel_encoder *encoder;
2000 	struct intel_connector *connector;
2001 	struct intel_dp *intel_dp = NULL;
2002 	int ret;
2003 	u8 crc[6];
2004 
2005 	drm_modeset_lock_all(dev);
2006 	list_for_each_entry(connector, &dev->mode_config.connector_list,
2007 			    base.head) {
2008 
2009 		if (connector->base.dpms != DRM_MODE_DPMS_ON)
2010 			continue;
2011 
2012 		if (!connector->base.encoder)
2013 			continue;
2014 
2015 		encoder = to_intel_encoder(connector->base.encoder);
2016 		if (encoder->type != INTEL_OUTPUT_EDP)
2017 			continue;
2018 
2019 		intel_dp = enc_to_intel_dp(&encoder->base);
2020 
2021 		ret = intel_dp_sink_crc(intel_dp, crc);
2022 		if (ret)
2023 			goto out;
2024 
2025 		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2026 			   crc[0], crc[1], crc[2],
2027 			   crc[3], crc[4], crc[5]);
2028 		goto out;
2029 	}
2030 	ret = -ENODEV;
2031 out:
2032 	drm_modeset_unlock_all(dev);
2033 	return ret;
2034 }
2035 
2036 static int i915_energy_uJ(struct seq_file *m, void *data)
2037 {
2038 	struct drm_info_node *node = m->private;
2039 	struct drm_device *dev = node->minor->dev;
2040 	struct drm_i915_private *dev_priv = dev->dev_private;
2041 	u64 power;
2042 	u32 units;
2043 
2044 	if (INTEL_INFO(dev)->gen < 6)
2045 		return -ENODEV;
2046 
2047 	intel_runtime_pm_get(dev_priv);
2048 
2049 	rdmsrl(MSR_RAPL_POWER_UNIT, power);
2050 	power = (power & 0x1f00) >> 8;
2051 	units = 1000000 / (1 << power); /* convert to uJ */
2052 	power = I915_READ(MCH_SECP_NRG_STTS);
2053 	power *= units;
2054 
2055 	intel_runtime_pm_put(dev_priv);
2056 
2057 	seq_printf(m, "%llu", (long long unsigned)power);
2058 
2059 	return 0;
2060 }
2061 
2062 static int i915_pc8_status(struct seq_file *m, void *unused)
2063 {
2064 	struct drm_info_node *node = m->private;
2065 	struct drm_device *dev = node->minor->dev;
2066 	struct drm_i915_private *dev_priv = dev->dev_private;
2067 
2068 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2069 		seq_puts(m, "not supported\n");
2070 		return 0;
2071 	}
2072 
2073 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2074 	seq_printf(m, "IRQs disabled: %s\n",
2075 		   yesno(dev_priv->pm.irqs_disabled));
2076 
2077 	return 0;
2078 }
2079 
2080 static const char *power_domain_str(enum intel_display_power_domain domain)
2081 {
2082 	switch (domain) {
2083 	case POWER_DOMAIN_PIPE_A:
2084 		return "PIPE_A";
2085 	case POWER_DOMAIN_PIPE_B:
2086 		return "PIPE_B";
2087 	case POWER_DOMAIN_PIPE_C:
2088 		return "PIPE_C";
2089 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
2090 		return "PIPE_A_PANEL_FITTER";
2091 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
2092 		return "PIPE_B_PANEL_FITTER";
2093 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
2094 		return "PIPE_C_PANEL_FITTER";
2095 	case POWER_DOMAIN_TRANSCODER_A:
2096 		return "TRANSCODER_A";
2097 	case POWER_DOMAIN_TRANSCODER_B:
2098 		return "TRANSCODER_B";
2099 	case POWER_DOMAIN_TRANSCODER_C:
2100 		return "TRANSCODER_C";
2101 	case POWER_DOMAIN_TRANSCODER_EDP:
2102 		return "TRANSCODER_EDP";
2103 	case POWER_DOMAIN_PORT_DDI_A_2_LANES:
2104 		return "PORT_DDI_A_2_LANES";
2105 	case POWER_DOMAIN_PORT_DDI_A_4_LANES:
2106 		return "PORT_DDI_A_4_LANES";
2107 	case POWER_DOMAIN_PORT_DDI_B_2_LANES:
2108 		return "PORT_DDI_B_2_LANES";
2109 	case POWER_DOMAIN_PORT_DDI_B_4_LANES:
2110 		return "PORT_DDI_B_4_LANES";
2111 	case POWER_DOMAIN_PORT_DDI_C_2_LANES:
2112 		return "PORT_DDI_C_2_LANES";
2113 	case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2114 		return "PORT_DDI_C_4_LANES";
2115 	case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2116 		return "PORT_DDI_D_2_LANES";
2117 	case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2118 		return "PORT_DDI_D_4_LANES";
2119 	case POWER_DOMAIN_PORT_DSI:
2120 		return "PORT_DSI";
2121 	case POWER_DOMAIN_PORT_CRT:
2122 		return "PORT_CRT";
2123 	case POWER_DOMAIN_PORT_OTHER:
2124 		return "PORT_OTHER";
2125 	case POWER_DOMAIN_VGA:
2126 		return "VGA";
2127 	case POWER_DOMAIN_AUDIO:
2128 		return "AUDIO";
2129 	case POWER_DOMAIN_INIT:
2130 		return "INIT";
2131 	default:
2132 		WARN_ON(1);
2133 		return "?";
2134 	}
2135 }
2136 
2137 static int i915_power_domain_info(struct seq_file *m, void *unused)
2138 {
2139 	struct drm_info_node *node = m->private;
2140 	struct drm_device *dev = node->minor->dev;
2141 	struct drm_i915_private *dev_priv = dev->dev_private;
2142 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2143 	int i;
2144 
2145 	mutex_lock(&power_domains->lock);
2146 
2147 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2148 	for (i = 0; i < power_domains->power_well_count; i++) {
2149 		struct i915_power_well *power_well;
2150 		enum intel_display_power_domain power_domain;
2151 
2152 		power_well = &power_domains->power_wells[i];
2153 		seq_printf(m, "%-25s %d\n", power_well->name,
2154 			   power_well->count);
2155 
2156 		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2157 		     power_domain++) {
2158 			if (!(BIT(power_domain) & power_well->domains))
2159 				continue;
2160 
2161 			seq_printf(m, "  %-23s %d\n",
2162 				 power_domain_str(power_domain),
2163 				 power_domains->domain_use_count[power_domain]);
2164 		}
2165 	}
2166 
2167 	mutex_unlock(&power_domains->lock);
2168 
2169 	return 0;
2170 }
2171 
2172 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2173 				 struct drm_display_mode *mode)
2174 {
2175 	int i;
2176 
2177 	for (i = 0; i < tabs; i++)
2178 		seq_putc(m, '\t');
2179 
2180 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2181 		   mode->base.id, mode->name,
2182 		   mode->vrefresh, mode->clock,
2183 		   mode->hdisplay, mode->hsync_start,
2184 		   mode->hsync_end, mode->htotal,
2185 		   mode->vdisplay, mode->vsync_start,
2186 		   mode->vsync_end, mode->vtotal,
2187 		   mode->type, mode->flags);
2188 }
2189 
2190 static void intel_encoder_info(struct seq_file *m,
2191 			       struct intel_crtc *intel_crtc,
2192 			       struct intel_encoder *intel_encoder)
2193 {
2194 	struct drm_info_node *node = m->private;
2195 	struct drm_device *dev = node->minor->dev;
2196 	struct drm_crtc *crtc = &intel_crtc->base;
2197 	struct intel_connector *intel_connector;
2198 	struct drm_encoder *encoder;
2199 
2200 	encoder = &intel_encoder->base;
2201 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2202 		   encoder->base.id, encoder->name);
2203 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2204 		struct drm_connector *connector = &intel_connector->base;
2205 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2206 			   connector->base.id,
2207 			   connector->name,
2208 			   drm_get_connector_status_name(connector->status));
2209 		if (connector->status == connector_status_connected) {
2210 			struct drm_display_mode *mode = &crtc->mode;
2211 			seq_printf(m, ", mode:\n");
2212 			intel_seq_print_mode(m, 2, mode);
2213 		} else {
2214 			seq_putc(m, '\n');
2215 		}
2216 	}
2217 }
2218 
2219 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2220 {
2221 	struct drm_info_node *node = m->private;
2222 	struct drm_device *dev = node->minor->dev;
2223 	struct drm_crtc *crtc = &intel_crtc->base;
2224 	struct intel_encoder *intel_encoder;
2225 
2226 	seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2227 		   crtc->primary->fb->base.id, crtc->x, crtc->y,
2228 		   crtc->primary->fb->width, crtc->primary->fb->height);
2229 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2230 		intel_encoder_info(m, intel_crtc, intel_encoder);
2231 }
2232 
2233 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2234 {
2235 	struct drm_display_mode *mode = panel->fixed_mode;
2236 
2237 	seq_printf(m, "\tfixed mode:\n");
2238 	intel_seq_print_mode(m, 2, mode);
2239 }
2240 
2241 static void intel_dp_info(struct seq_file *m,
2242 			  struct intel_connector *intel_connector)
2243 {
2244 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2245 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2246 
2247 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2248 	seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
2249 		   "no");
2250 	if (intel_encoder->type == INTEL_OUTPUT_EDP)
2251 		intel_panel_info(m, &intel_connector->panel);
2252 }
2253 
2254 static void intel_hdmi_info(struct seq_file *m,
2255 			    struct intel_connector *intel_connector)
2256 {
2257 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2258 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2259 
2260 	seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
2261 		   "no");
2262 }
2263 
2264 static void intel_lvds_info(struct seq_file *m,
2265 			    struct intel_connector *intel_connector)
2266 {
2267 	intel_panel_info(m, &intel_connector->panel);
2268 }
2269 
2270 static void intel_connector_info(struct seq_file *m,
2271 				 struct drm_connector *connector)
2272 {
2273 	struct intel_connector *intel_connector = to_intel_connector(connector);
2274 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2275 	struct drm_display_mode *mode;
2276 
2277 	seq_printf(m, "connector %d: type %s, status: %s\n",
2278 		   connector->base.id, connector->name,
2279 		   drm_get_connector_status_name(connector->status));
2280 	if (connector->status == connector_status_connected) {
2281 		seq_printf(m, "\tname: %s\n", connector->display_info.name);
2282 		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2283 			   connector->display_info.width_mm,
2284 			   connector->display_info.height_mm);
2285 		seq_printf(m, "\tsubpixel order: %s\n",
2286 			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2287 		seq_printf(m, "\tCEA rev: %d\n",
2288 			   connector->display_info.cea_rev);
2289 	}
2290 	if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2291 	    intel_encoder->type == INTEL_OUTPUT_EDP)
2292 		intel_dp_info(m, intel_connector);
2293 	else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2294 		intel_hdmi_info(m, intel_connector);
2295 	else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2296 		intel_lvds_info(m, intel_connector);
2297 
2298 	seq_printf(m, "\tmodes:\n");
2299 	list_for_each_entry(mode, &connector->modes, head)
2300 		intel_seq_print_mode(m, 2, mode);
2301 }
2302 
2303 static bool cursor_active(struct drm_device *dev, int pipe)
2304 {
2305 	struct drm_i915_private *dev_priv = dev->dev_private;
2306 	u32 state;
2307 
2308 	if (IS_845G(dev) || IS_I865G(dev))
2309 		state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
2310 	else
2311 		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2312 
2313 	return state;
2314 }
2315 
2316 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2317 {
2318 	struct drm_i915_private *dev_priv = dev->dev_private;
2319 	u32 pos;
2320 
2321 	pos = I915_READ(CURPOS(pipe));
2322 
2323 	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2324 	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2325 		*x = -*x;
2326 
2327 	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2328 	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2329 		*y = -*y;
2330 
2331 	return cursor_active(dev, pipe);
2332 }
2333 
2334 static int i915_display_info(struct seq_file *m, void *unused)
2335 {
2336 	struct drm_info_node *node = m->private;
2337 	struct drm_device *dev = node->minor->dev;
2338 	struct drm_i915_private *dev_priv = dev->dev_private;
2339 	struct intel_crtc *crtc;
2340 	struct drm_connector *connector;
2341 
2342 	intel_runtime_pm_get(dev_priv);
2343 	drm_modeset_lock_all(dev);
2344 	seq_printf(m, "CRTC info\n");
2345 	seq_printf(m, "---------\n");
2346 	for_each_intel_crtc(dev, crtc) {
2347 		bool active;
2348 		int x, y;
2349 
2350 		seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
2351 			   crtc->base.base.id, pipe_name(crtc->pipe),
2352 			   yesno(crtc->active));
2353 		if (crtc->active) {
2354 			intel_crtc_info(m, crtc);
2355 
2356 			active = cursor_position(dev, crtc->pipe, &x, &y);
2357 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
2358 				   yesno(crtc->cursor_base),
2359 				   x, y, crtc->cursor_addr,
2360 				   yesno(active));
2361 		}
2362 
2363 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2364 			   yesno(!crtc->cpu_fifo_underrun_disabled),
2365 			   yesno(!crtc->pch_fifo_underrun_disabled));
2366 	}
2367 
2368 	seq_printf(m, "\n");
2369 	seq_printf(m, "Connector info\n");
2370 	seq_printf(m, "--------------\n");
2371 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2372 		intel_connector_info(m, connector);
2373 	}
2374 	drm_modeset_unlock_all(dev);
2375 	intel_runtime_pm_put(dev_priv);
2376 
2377 	return 0;
2378 }
2379 
2380 struct pipe_crc_info {
2381 	const char *name;
2382 	struct drm_device *dev;
2383 	enum pipe pipe;
2384 };
2385 
2386 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
2387 {
2388 	struct pipe_crc_info *info = inode->i_private;
2389 	struct drm_i915_private *dev_priv = info->dev->dev_private;
2390 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2391 
2392 	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
2393 		return -ENODEV;
2394 
2395 	spin_lock_irq(&pipe_crc->lock);
2396 
2397 	if (pipe_crc->opened) {
2398 		spin_unlock_irq(&pipe_crc->lock);
2399 		return -EBUSY; /* already open */
2400 	}
2401 
2402 	pipe_crc->opened = true;
2403 	filep->private_data = inode->i_private;
2404 
2405 	spin_unlock_irq(&pipe_crc->lock);
2406 
2407 	return 0;
2408 }
2409 
2410 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
2411 {
2412 	struct pipe_crc_info *info = inode->i_private;
2413 	struct drm_i915_private *dev_priv = info->dev->dev_private;
2414 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2415 
2416 	spin_lock_irq(&pipe_crc->lock);
2417 	pipe_crc->opened = false;
2418 	spin_unlock_irq(&pipe_crc->lock);
2419 
2420 	return 0;
2421 }
2422 
2423 /* (6 fields, 8 chars each, space separated (5) + '\n') */
2424 #define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
2425 /* account for \'0' */
2426 #define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
2427 
2428 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
2429 {
2430 	assert_spin_locked(&pipe_crc->lock);
2431 	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
2432 			INTEL_PIPE_CRC_ENTRIES_NR);
2433 }
2434 
2435 static ssize_t
2436 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
2437 		   loff_t *pos)
2438 {
2439 	struct pipe_crc_info *info = filep->private_data;
2440 	struct drm_device *dev = info->dev;
2441 	struct drm_i915_private *dev_priv = dev->dev_private;
2442 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
2443 	char buf[PIPE_CRC_BUFFER_LEN];
2444 	int head, tail, n_entries, n;
2445 	ssize_t bytes_read;
2446 
2447 	/*
2448 	 * Don't allow user space to provide buffers not big enough to hold
2449 	 * a line of data.
2450 	 */
2451 	if (count < PIPE_CRC_LINE_LEN)
2452 		return -EINVAL;
2453 
2454 	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
2455 		return 0;
2456 
2457 	/* nothing to read */
2458 	spin_lock_irq(&pipe_crc->lock);
2459 	while (pipe_crc_data_count(pipe_crc) == 0) {
2460 		int ret;
2461 
2462 		if (filep->f_flags & O_NONBLOCK) {
2463 			spin_unlock_irq(&pipe_crc->lock);
2464 			return -EAGAIN;
2465 		}
2466 
2467 		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
2468 				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
2469 		if (ret) {
2470 			spin_unlock_irq(&pipe_crc->lock);
2471 			return ret;
2472 		}
2473 	}
2474 
2475 	/* We now have one or more entries to read */
2476 	head = pipe_crc->head;
2477 	tail = pipe_crc->tail;
2478 	n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
2479 			count / PIPE_CRC_LINE_LEN);
2480 	spin_unlock_irq(&pipe_crc->lock);
2481 
2482 	bytes_read = 0;
2483 	n = 0;
2484 	do {
2485 		struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
2486 		int ret;
2487 
2488 		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
2489 				       "%8u %8x %8x %8x %8x %8x\n",
2490 				       entry->frame, entry->crc[0],
2491 				       entry->crc[1], entry->crc[2],
2492 				       entry->crc[3], entry->crc[4]);
2493 
2494 		ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
2495 				   buf, PIPE_CRC_LINE_LEN);
2496 		if (ret == PIPE_CRC_LINE_LEN)
2497 			return -EFAULT;
2498 
2499 		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
2500 		tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
2501 		n++;
2502 	} while (--n_entries);
2503 
2504 	spin_lock_irq(&pipe_crc->lock);
2505 	pipe_crc->tail = tail;
2506 	spin_unlock_irq(&pipe_crc->lock);
2507 
2508 	return bytes_read;
2509 }
2510 
2511 static const struct file_operations i915_pipe_crc_fops = {
2512 	.owner = THIS_MODULE,
2513 	.open = i915_pipe_crc_open,
2514 	.read = i915_pipe_crc_read,
2515 	.release = i915_pipe_crc_release,
2516 };
2517 
2518 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
2519 	{
2520 		.name = "i915_pipe_A_crc",
2521 		.pipe = PIPE_A,
2522 	},
2523 	{
2524 		.name = "i915_pipe_B_crc",
2525 		.pipe = PIPE_B,
2526 	},
2527 	{
2528 		.name = "i915_pipe_C_crc",
2529 		.pipe = PIPE_C,
2530 	},
2531 };
2532 
2533 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
2534 				enum pipe pipe)
2535 {
2536 	struct drm_device *dev = minor->dev;
2537 	struct dentry *ent;
2538 	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
2539 
2540 	info->dev = dev;
2541 	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
2542 				  &i915_pipe_crc_fops);
2543 	if (!ent)
2544 		return -ENOMEM;
2545 
2546 	return drm_add_fake_info_node(minor, ent, info);
2547 }
2548 
2549 static const char * const pipe_crc_sources[] = {
2550 	"none",
2551 	"plane1",
2552 	"plane2",
2553 	"pf",
2554 	"pipe",
2555 	"TV",
2556 	"DP-B",
2557 	"DP-C",
2558 	"DP-D",
2559 	"auto",
2560 };
2561 
2562 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
2563 {
2564 	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
2565 	return pipe_crc_sources[source];
2566 }
2567 
2568 static int display_crc_ctl_show(struct seq_file *m, void *data)
2569 {
2570 	struct drm_device *dev = m->private;
2571 	struct drm_i915_private *dev_priv = dev->dev_private;
2572 	int i;
2573 
2574 	for (i = 0; i < I915_MAX_PIPES; i++)
2575 		seq_printf(m, "%c %s\n", pipe_name(i),
2576 			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
2577 
2578 	return 0;
2579 }
2580 
2581 static int display_crc_ctl_open(struct inode *inode, struct file *file)
2582 {
2583 	struct drm_device *dev = inode->i_private;
2584 
2585 	return single_open(file, display_crc_ctl_show, dev);
2586 }
2587 
2588 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2589 				 uint32_t *val)
2590 {
2591 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2592 		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
2593 
2594 	switch (*source) {
2595 	case INTEL_PIPE_CRC_SOURCE_PIPE:
2596 		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
2597 		break;
2598 	case INTEL_PIPE_CRC_SOURCE_NONE:
2599 		*val = 0;
2600 		break;
2601 	default:
2602 		return -EINVAL;
2603 	}
2604 
2605 	return 0;
2606 }
2607 
2608 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2609 				     enum intel_pipe_crc_source *source)
2610 {
2611 	struct intel_encoder *encoder;
2612 	struct intel_crtc *crtc;
2613 	struct intel_digital_port *dig_port;
2614 	int ret = 0;
2615 
2616 	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
2617 
2618 	drm_modeset_lock_all(dev);
2619 	list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2620 			    base.head) {
2621 		if (!encoder->base.crtc)
2622 			continue;
2623 
2624 		crtc = to_intel_crtc(encoder->base.crtc);
2625 
2626 		if (crtc->pipe != pipe)
2627 			continue;
2628 
2629 		switch (encoder->type) {
2630 		case INTEL_OUTPUT_TVOUT:
2631 			*source = INTEL_PIPE_CRC_SOURCE_TV;
2632 			break;
2633 		case INTEL_OUTPUT_DISPLAYPORT:
2634 		case INTEL_OUTPUT_EDP:
2635 			dig_port = enc_to_dig_port(&encoder->base);
2636 			switch (dig_port->port) {
2637 			case PORT_B:
2638 				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
2639 				break;
2640 			case PORT_C:
2641 				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
2642 				break;
2643 			case PORT_D:
2644 				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
2645 				break;
2646 			default:
2647 				WARN(1, "nonexisting DP port %c\n",
2648 				     port_name(dig_port->port));
2649 				break;
2650 			}
2651 			break;
2652 		}
2653 	}
2654 	drm_modeset_unlock_all(dev);
2655 
2656 	return ret;
2657 }
2658 
2659 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
2660 				enum pipe pipe,
2661 				enum intel_pipe_crc_source *source,
2662 				uint32_t *val)
2663 {
2664 	struct drm_i915_private *dev_priv = dev->dev_private;
2665 	bool need_stable_symbols = false;
2666 
2667 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2668 		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2669 		if (ret)
2670 			return ret;
2671 	}
2672 
2673 	switch (*source) {
2674 	case INTEL_PIPE_CRC_SOURCE_PIPE:
2675 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
2676 		break;
2677 	case INTEL_PIPE_CRC_SOURCE_DP_B:
2678 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
2679 		need_stable_symbols = true;
2680 		break;
2681 	case INTEL_PIPE_CRC_SOURCE_DP_C:
2682 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
2683 		need_stable_symbols = true;
2684 		break;
2685 	case INTEL_PIPE_CRC_SOURCE_NONE:
2686 		*val = 0;
2687 		break;
2688 	default:
2689 		return -EINVAL;
2690 	}
2691 
2692 	/*
2693 	 * When the pipe CRC tap point is after the transcoders we need
2694 	 * to tweak symbol-level features to produce a deterministic series of
2695 	 * symbols for a given frame. We need to reset those features only once
2696 	 * a frame (instead of every nth symbol):
2697 	 *   - DC-balance: used to ensure a better clock recovery from the data
2698 	 *     link (SDVO)
2699 	 *   - DisplayPort scrambling: used for EMI reduction
2700 	 */
2701 	if (need_stable_symbols) {
2702 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2703 
2704 		tmp |= DC_BALANCE_RESET_VLV;
2705 		if (pipe == PIPE_A)
2706 			tmp |= PIPE_A_SCRAMBLE_RESET;
2707 		else
2708 			tmp |= PIPE_B_SCRAMBLE_RESET;
2709 
2710 		I915_WRITE(PORT_DFT2_G4X, tmp);
2711 	}
2712 
2713 	return 0;
2714 }
2715 
2716 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
2717 				 enum pipe pipe,
2718 				 enum intel_pipe_crc_source *source,
2719 				 uint32_t *val)
2720 {
2721 	struct drm_i915_private *dev_priv = dev->dev_private;
2722 	bool need_stable_symbols = false;
2723 
2724 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
2725 		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
2726 		if (ret)
2727 			return ret;
2728 	}
2729 
2730 	switch (*source) {
2731 	case INTEL_PIPE_CRC_SOURCE_PIPE:
2732 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
2733 		break;
2734 	case INTEL_PIPE_CRC_SOURCE_TV:
2735 		if (!SUPPORTS_TV(dev))
2736 			return -EINVAL;
2737 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
2738 		break;
2739 	case INTEL_PIPE_CRC_SOURCE_DP_B:
2740 		if (!IS_G4X(dev))
2741 			return -EINVAL;
2742 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
2743 		need_stable_symbols = true;
2744 		break;
2745 	case INTEL_PIPE_CRC_SOURCE_DP_C:
2746 		if (!IS_G4X(dev))
2747 			return -EINVAL;
2748 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
2749 		need_stable_symbols = true;
2750 		break;
2751 	case INTEL_PIPE_CRC_SOURCE_DP_D:
2752 		if (!IS_G4X(dev))
2753 			return -EINVAL;
2754 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
2755 		need_stable_symbols = true;
2756 		break;
2757 	case INTEL_PIPE_CRC_SOURCE_NONE:
2758 		*val = 0;
2759 		break;
2760 	default:
2761 		return -EINVAL;
2762 	}
2763 
2764 	/*
2765 	 * When the pipe CRC tap point is after the transcoders we need
2766 	 * to tweak symbol-level features to produce a deterministic series of
2767 	 * symbols for a given frame. We need to reset those features only once
2768 	 * a frame (instead of every nth symbol):
2769 	 *   - DC-balance: used to ensure a better clock recovery from the data
2770 	 *     link (SDVO)
2771 	 *   - DisplayPort scrambling: used for EMI reduction
2772 	 */
2773 	if (need_stable_symbols) {
2774 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2775 
2776 		WARN_ON(!IS_G4X(dev));
2777 
2778 		I915_WRITE(PORT_DFT_I9XX,
2779 			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
2780 
2781 		if (pipe == PIPE_A)
2782 			tmp |= PIPE_A_SCRAMBLE_RESET;
2783 		else
2784 			tmp |= PIPE_B_SCRAMBLE_RESET;
2785 
2786 		I915_WRITE(PORT_DFT2_G4X, tmp);
2787 	}
2788 
2789 	return 0;
2790 }
2791 
2792 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
2793 					 enum pipe pipe)
2794 {
2795 	struct drm_i915_private *dev_priv = dev->dev_private;
2796 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2797 
2798 	if (pipe == PIPE_A)
2799 		tmp &= ~PIPE_A_SCRAMBLE_RESET;
2800 	else
2801 		tmp &= ~PIPE_B_SCRAMBLE_RESET;
2802 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
2803 		tmp &= ~DC_BALANCE_RESET_VLV;
2804 	I915_WRITE(PORT_DFT2_G4X, tmp);
2805 
2806 }
2807 
2808 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
2809 					 enum pipe pipe)
2810 {
2811 	struct drm_i915_private *dev_priv = dev->dev_private;
2812 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
2813 
2814 	if (pipe == PIPE_A)
2815 		tmp &= ~PIPE_A_SCRAMBLE_RESET;
2816 	else
2817 		tmp &= ~PIPE_B_SCRAMBLE_RESET;
2818 	I915_WRITE(PORT_DFT2_G4X, tmp);
2819 
2820 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
2821 		I915_WRITE(PORT_DFT_I9XX,
2822 			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
2823 	}
2824 }
2825 
2826 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2827 				uint32_t *val)
2828 {
2829 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2830 		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
2831 
2832 	switch (*source) {
2833 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
2834 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
2835 		break;
2836 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
2837 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
2838 		break;
2839 	case INTEL_PIPE_CRC_SOURCE_PIPE:
2840 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
2841 		break;
2842 	case INTEL_PIPE_CRC_SOURCE_NONE:
2843 		*val = 0;
2844 		break;
2845 	default:
2846 		return -EINVAL;
2847 	}
2848 
2849 	return 0;
2850 }
2851 
2852 static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
2853 				uint32_t *val)
2854 {
2855 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
2856 		*source = INTEL_PIPE_CRC_SOURCE_PF;
2857 
2858 	switch (*source) {
2859 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
2860 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
2861 		break;
2862 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
2863 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
2864 		break;
2865 	case INTEL_PIPE_CRC_SOURCE_PF:
2866 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
2867 		break;
2868 	case INTEL_PIPE_CRC_SOURCE_NONE:
2869 		*val = 0;
2870 		break;
2871 	default:
2872 		return -EINVAL;
2873 	}
2874 
2875 	return 0;
2876 }
2877 
2878 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
2879 			       enum intel_pipe_crc_source source)
2880 {
2881 	struct drm_i915_private *dev_priv = dev->dev_private;
2882 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
2883 	u32 val = 0; /* shut up gcc */
2884 	int ret;
2885 
2886 	if (pipe_crc->source == source)
2887 		return 0;
2888 
2889 	/* forbid changing the source without going back to 'none' */
2890 	if (pipe_crc->source && source)
2891 		return -EINVAL;
2892 
2893 	if (IS_GEN2(dev))
2894 		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
2895 	else if (INTEL_INFO(dev)->gen < 5)
2896 		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
2897 	else if (IS_VALLEYVIEW(dev))
2898 		ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
2899 	else if (IS_GEN5(dev) || IS_GEN6(dev))
2900 		ret = ilk_pipe_crc_ctl_reg(&source, &val);
2901 	else
2902 		ret = ivb_pipe_crc_ctl_reg(&source, &val);
2903 
2904 	if (ret != 0)
2905 		return ret;
2906 
2907 	/* none -> real source transition */
2908 	if (source) {
2909 		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
2910 				 pipe_name(pipe), pipe_crc_source_name(source));
2911 
2912 		pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
2913 					    INTEL_PIPE_CRC_ENTRIES_NR,
2914 					    GFP_KERNEL);
2915 		if (!pipe_crc->entries)
2916 			return -ENOMEM;
2917 
2918 		spin_lock_irq(&pipe_crc->lock);
2919 		pipe_crc->head = 0;
2920 		pipe_crc->tail = 0;
2921 		spin_unlock_irq(&pipe_crc->lock);
2922 	}
2923 
2924 	pipe_crc->source = source;
2925 
2926 	I915_WRITE(PIPE_CRC_CTL(pipe), val);
2927 	POSTING_READ(PIPE_CRC_CTL(pipe));
2928 
2929 	/* real source -> none transition */
2930 	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
2931 		struct intel_pipe_crc_entry *entries;
2932 
2933 		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
2934 				 pipe_name(pipe));
2935 
2936 		intel_wait_for_vblank(dev, pipe);
2937 
2938 		spin_lock_irq(&pipe_crc->lock);
2939 		entries = pipe_crc->entries;
2940 		pipe_crc->entries = NULL;
2941 		spin_unlock_irq(&pipe_crc->lock);
2942 
2943 		kfree(entries);
2944 
2945 		if (IS_G4X(dev))
2946 			g4x_undo_pipe_scramble_reset(dev, pipe);
2947 		else if (IS_VALLEYVIEW(dev))
2948 			vlv_undo_pipe_scramble_reset(dev, pipe);
2949 	}
2950 
2951 	return 0;
2952 }
2953 
2954 /*
2955  * Parse pipe CRC command strings:
2956  *   command: wsp* object wsp+ name wsp+ source wsp*
2957  *   object: 'pipe'
2958  *   name: (A | B | C)
2959  *   source: (none | plane1 | plane2 | pf)
2960  *   wsp: (#0x20 | #0x9 | #0xA)+
2961  *
2962  * eg.:
2963  *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
2964  *  "pipe A none"    ->  Stop CRC
2965  */
2966 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
2967 {
2968 	int n_words = 0;
2969 
2970 	while (*buf) {
2971 		char *end;
2972 
2973 		/* skip leading white space */
2974 		buf = skip_spaces(buf);
2975 		if (!*buf)
2976 			break;	/* end of buffer */
2977 
2978 		/* find end of word */
2979 		for (end = buf; *end && !isspace(*end); end++)
2980 			;
2981 
2982 		if (n_words == max_words) {
2983 			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
2984 					 max_words);
2985 			return -EINVAL;	/* ran out of words[] before bytes */
2986 		}
2987 
2988 		if (*end)
2989 			*end++ = '\0';
2990 		words[n_words++] = buf;
2991 		buf = end;
2992 	}
2993 
2994 	return n_words;
2995 }
2996 
2997 enum intel_pipe_crc_object {
2998 	PIPE_CRC_OBJECT_PIPE,
2999 };
3000 
3001 static const char * const pipe_crc_objects[] = {
3002 	"pipe",
3003 };
3004 
3005 static int
3006 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
3007 {
3008 	int i;
3009 
3010 	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
3011 		if (!strcmp(buf, pipe_crc_objects[i])) {
3012 			*o = i;
3013 			return 0;
3014 		    }
3015 
3016 	return -EINVAL;
3017 }
3018 
3019 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
3020 {
3021 	const char name = buf[0];
3022 
3023 	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
3024 		return -EINVAL;
3025 
3026 	*pipe = name - 'A';
3027 
3028 	return 0;
3029 }
3030 
3031 static int
3032 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
3033 {
3034 	int i;
3035 
3036 	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
3037 		if (!strcmp(buf, pipe_crc_sources[i])) {
3038 			*s = i;
3039 			return 0;
3040 		    }
3041 
3042 	return -EINVAL;
3043 }
3044 
3045 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
3046 {
3047 #define N_WORDS 3
3048 	int n_words;
3049 	char *words[N_WORDS];
3050 	enum pipe pipe;
3051 	enum intel_pipe_crc_object object;
3052 	enum intel_pipe_crc_source source;
3053 
3054 	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
3055 	if (n_words != N_WORDS) {
3056 		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3057 				 N_WORDS);
3058 		return -EINVAL;
3059 	}
3060 
3061 	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
3062 		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
3063 		return -EINVAL;
3064 	}
3065 
3066 	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
3067 		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
3068 		return -EINVAL;
3069 	}
3070 
3071 	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
3072 		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
3073 		return -EINVAL;
3074 	}
3075 
3076 	return pipe_crc_set_source(dev, pipe, source);
3077 }
3078 
3079 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
3080 				     size_t len, loff_t *offp)
3081 {
3082 	struct seq_file *m = file->private_data;
3083 	struct drm_device *dev = m->private;
3084 	char *tmpbuf;
3085 	int ret;
3086 
3087 	if (len == 0)
3088 		return 0;
3089 
3090 	if (len > PAGE_SIZE - 1) {
3091 		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3092 				 PAGE_SIZE);
3093 		return -E2BIG;
3094 	}
3095 
3096 	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
3097 	if (!tmpbuf)
3098 		return -ENOMEM;
3099 
3100 	if (copy_from_user(tmpbuf, ubuf, len)) {
3101 		ret = -EFAULT;
3102 		goto out;
3103 	}
3104 	tmpbuf[len] = '\0';
3105 
3106 	ret = display_crc_ctl_parse(dev, tmpbuf, len);
3107 
3108 out:
3109 	kfree(tmpbuf);
3110 	if (ret < 0)
3111 		return ret;
3112 
3113 	*offp += len;
3114 	return len;
3115 }
3116 
3117 static const struct file_operations i915_display_crc_ctl_fops = {
3118 	.owner = THIS_MODULE,
3119 	.open = display_crc_ctl_open,
3120 	.read = seq_read,
3121 	.llseek = seq_lseek,
3122 	.release = single_release,
3123 	.write = display_crc_ctl_write
3124 };
3125 
3126 static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3127 {
3128 	struct drm_device *dev = m->private;
3129 	int num_levels = ilk_wm_max_level(dev) + 1;
3130 	int level;
3131 
3132 	drm_modeset_lock_all(dev);
3133 
3134 	for (level = 0; level < num_levels; level++) {
3135 		unsigned int latency = wm[level];
3136 
3137 		/* WM1+ latency values in 0.5us units */
3138 		if (level > 0)
3139 			latency *= 5;
3140 
3141 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3142 			   level, wm[level],
3143 			   latency / 10, latency % 10);
3144 	}
3145 
3146 	drm_modeset_unlock_all(dev);
3147 }
3148 
3149 static int pri_wm_latency_show(struct seq_file *m, void *data)
3150 {
3151 	struct drm_device *dev = m->private;
3152 
3153 	wm_latency_show(m, to_i915(dev)->wm.pri_latency);
3154 
3155 	return 0;
3156 }
3157 
3158 static int spr_wm_latency_show(struct seq_file *m, void *data)
3159 {
3160 	struct drm_device *dev = m->private;
3161 
3162 	wm_latency_show(m, to_i915(dev)->wm.spr_latency);
3163 
3164 	return 0;
3165 }
3166 
3167 static int cur_wm_latency_show(struct seq_file *m, void *data)
3168 {
3169 	struct drm_device *dev = m->private;
3170 
3171 	wm_latency_show(m, to_i915(dev)->wm.cur_latency);
3172 
3173 	return 0;
3174 }
3175 
3176 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3177 {
3178 	struct drm_device *dev = inode->i_private;
3179 
3180 	if (!HAS_PCH_SPLIT(dev))
3181 		return -ENODEV;
3182 
3183 	return single_open(file, pri_wm_latency_show, dev);
3184 }
3185 
3186 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3187 {
3188 	struct drm_device *dev = inode->i_private;
3189 
3190 	if (!HAS_PCH_SPLIT(dev))
3191 		return -ENODEV;
3192 
3193 	return single_open(file, spr_wm_latency_show, dev);
3194 }
3195 
3196 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3197 {
3198 	struct drm_device *dev = inode->i_private;
3199 
3200 	if (!HAS_PCH_SPLIT(dev))
3201 		return -ENODEV;
3202 
3203 	return single_open(file, cur_wm_latency_show, dev);
3204 }
3205 
3206 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3207 				size_t len, loff_t *offp, uint16_t wm[5])
3208 {
3209 	struct seq_file *m = file->private_data;
3210 	struct drm_device *dev = m->private;
3211 	uint16_t new[5] = { 0 };
3212 	int num_levels = ilk_wm_max_level(dev) + 1;
3213 	int level;
3214 	int ret;
3215 	char tmp[32];
3216 
3217 	if (len >= sizeof(tmp))
3218 		return -EINVAL;
3219 
3220 	if (copy_from_user(tmp, ubuf, len))
3221 		return -EFAULT;
3222 
3223 	tmp[len] = '\0';
3224 
3225 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]);
3226 	if (ret != num_levels)
3227 		return -EINVAL;
3228 
3229 	drm_modeset_lock_all(dev);
3230 
3231 	for (level = 0; level < num_levels; level++)
3232 		wm[level] = new[level];
3233 
3234 	drm_modeset_unlock_all(dev);
3235 
3236 	return len;
3237 }
3238 
3239 
3240 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3241 				    size_t len, loff_t *offp)
3242 {
3243 	struct seq_file *m = file->private_data;
3244 	struct drm_device *dev = m->private;
3245 
3246 	return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency);
3247 }
3248 
3249 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3250 				    size_t len, loff_t *offp)
3251 {
3252 	struct seq_file *m = file->private_data;
3253 	struct drm_device *dev = m->private;
3254 
3255 	return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency);
3256 }
3257 
3258 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3259 				    size_t len, loff_t *offp)
3260 {
3261 	struct seq_file *m = file->private_data;
3262 	struct drm_device *dev = m->private;
3263 
3264 	return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency);
3265 }
3266 
3267 static const struct file_operations i915_pri_wm_latency_fops = {
3268 	.owner = THIS_MODULE,
3269 	.open = pri_wm_latency_open,
3270 	.read = seq_read,
3271 	.llseek = seq_lseek,
3272 	.release = single_release,
3273 	.write = pri_wm_latency_write
3274 };
3275 
3276 static const struct file_operations i915_spr_wm_latency_fops = {
3277 	.owner = THIS_MODULE,
3278 	.open = spr_wm_latency_open,
3279 	.read = seq_read,
3280 	.llseek = seq_lseek,
3281 	.release = single_release,
3282 	.write = spr_wm_latency_write
3283 };
3284 
3285 static const struct file_operations i915_cur_wm_latency_fops = {
3286 	.owner = THIS_MODULE,
3287 	.open = cur_wm_latency_open,
3288 	.read = seq_read,
3289 	.llseek = seq_lseek,
3290 	.release = single_release,
3291 	.write = cur_wm_latency_write
3292 };
3293 
3294 static int
3295 i915_wedged_get(void *data, u64 *val)
3296 {
3297 	struct drm_device *dev = data;
3298 	struct drm_i915_private *dev_priv = dev->dev_private;
3299 
3300 	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
3301 
3302 	return 0;
3303 }
3304 
3305 static int
3306 i915_wedged_set(void *data, u64 val)
3307 {
3308 	struct drm_device *dev = data;
3309 	struct drm_i915_private *dev_priv = dev->dev_private;
3310 
3311 	intel_runtime_pm_get(dev_priv);
3312 
3313 	i915_handle_error(dev, val,
3314 			  "Manually setting wedged to %llu", val);
3315 
3316 	intel_runtime_pm_put(dev_priv);
3317 
3318 	return 0;
3319 }
3320 
3321 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3322 			i915_wedged_get, i915_wedged_set,
3323 			"%llu\n");
3324 
3325 static int
3326 i915_ring_stop_get(void *data, u64 *val)
3327 {
3328 	struct drm_device *dev = data;
3329 	struct drm_i915_private *dev_priv = dev->dev_private;
3330 
3331 	*val = dev_priv->gpu_error.stop_rings;
3332 
3333 	return 0;
3334 }
3335 
3336 static int
3337 i915_ring_stop_set(void *data, u64 val)
3338 {
3339 	struct drm_device *dev = data;
3340 	struct drm_i915_private *dev_priv = dev->dev_private;
3341 	int ret;
3342 
3343 	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
3344 
3345 	ret = mutex_lock_interruptible(&dev->struct_mutex);
3346 	if (ret)
3347 		return ret;
3348 
3349 	dev_priv->gpu_error.stop_rings = val;
3350 	mutex_unlock(&dev->struct_mutex);
3351 
3352 	return 0;
3353 }
3354 
3355 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
3356 			i915_ring_stop_get, i915_ring_stop_set,
3357 			"0x%08llx\n");
3358 
3359 static int
3360 i915_ring_missed_irq_get(void *data, u64 *val)
3361 {
3362 	struct drm_device *dev = data;
3363 	struct drm_i915_private *dev_priv = dev->dev_private;
3364 
3365 	*val = dev_priv->gpu_error.missed_irq_rings;
3366 	return 0;
3367 }
3368 
3369 static int
3370 i915_ring_missed_irq_set(void *data, u64 val)
3371 {
3372 	struct drm_device *dev = data;
3373 	struct drm_i915_private *dev_priv = dev->dev_private;
3374 	int ret;
3375 
3376 	/* Lock against concurrent debugfs callers */
3377 	ret = mutex_lock_interruptible(&dev->struct_mutex);
3378 	if (ret)
3379 		return ret;
3380 	dev_priv->gpu_error.missed_irq_rings = val;
3381 	mutex_unlock(&dev->struct_mutex);
3382 
3383 	return 0;
3384 }
3385 
3386 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
3387 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
3388 			"0x%08llx\n");
3389 
3390 static int
3391 i915_ring_test_irq_get(void *data, u64 *val)
3392 {
3393 	struct drm_device *dev = data;
3394 	struct drm_i915_private *dev_priv = dev->dev_private;
3395 
3396 	*val = dev_priv->gpu_error.test_irq_rings;
3397 
3398 	return 0;
3399 }
3400 
3401 static int
3402 i915_ring_test_irq_set(void *data, u64 val)
3403 {
3404 	struct drm_device *dev = data;
3405 	struct drm_i915_private *dev_priv = dev->dev_private;
3406 	int ret;
3407 
3408 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
3409 
3410 	/* Lock against concurrent debugfs callers */
3411 	ret = mutex_lock_interruptible(&dev->struct_mutex);
3412 	if (ret)
3413 		return ret;
3414 
3415 	dev_priv->gpu_error.test_irq_rings = val;
3416 	mutex_unlock(&dev->struct_mutex);
3417 
3418 	return 0;
3419 }
3420 
3421 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
3422 			i915_ring_test_irq_get, i915_ring_test_irq_set,
3423 			"0x%08llx\n");
3424 
3425 #define DROP_UNBOUND 0x1
3426 #define DROP_BOUND 0x2
3427 #define DROP_RETIRE 0x4
3428 #define DROP_ACTIVE 0x8
3429 #define DROP_ALL (DROP_UNBOUND | \
3430 		  DROP_BOUND | \
3431 		  DROP_RETIRE | \
3432 		  DROP_ACTIVE)
3433 static int
3434 i915_drop_caches_get(void *data, u64 *val)
3435 {
3436 	*val = DROP_ALL;
3437 
3438 	return 0;
3439 }
3440 
3441 static int
3442 i915_drop_caches_set(void *data, u64 val)
3443 {
3444 	struct drm_device *dev = data;
3445 	struct drm_i915_private *dev_priv = dev->dev_private;
3446 	struct drm_i915_gem_object *obj, *next;
3447 	struct i915_address_space *vm;
3448 	struct i915_vma *vma, *x;
3449 	int ret;
3450 
3451 	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
3452 
3453 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
3454 	 * on ioctls on -EAGAIN. */
3455 	ret = mutex_lock_interruptible(&dev->struct_mutex);
3456 	if (ret)
3457 		return ret;
3458 
3459 	if (val & DROP_ACTIVE) {
3460 		ret = i915_gpu_idle(dev);
3461 		if (ret)
3462 			goto unlock;
3463 	}
3464 
3465 	if (val & (DROP_RETIRE | DROP_ACTIVE))
3466 		i915_gem_retire_requests(dev);
3467 
3468 	if (val & DROP_BOUND) {
3469 		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3470 			list_for_each_entry_safe(vma, x, &vm->inactive_list,
3471 						 mm_list) {
3472 				if (vma->pin_count)
3473 					continue;
3474 
3475 				ret = i915_vma_unbind(vma);
3476 				if (ret)
3477 					goto unlock;
3478 			}
3479 		}
3480 	}
3481 
3482 	if (val & DROP_UNBOUND) {
3483 		list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
3484 					 global_list)
3485 			if (obj->pages_pin_count == 0) {
3486 				ret = i915_gem_object_put_pages(obj);
3487 				if (ret)
3488 					goto unlock;
3489 			}
3490 	}
3491 
3492 unlock:
3493 	mutex_unlock(&dev->struct_mutex);
3494 
3495 	return ret;
3496 }
3497 
3498 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3499 			i915_drop_caches_get, i915_drop_caches_set,
3500 			"0x%08llx\n");
3501 
3502 static int
3503 i915_max_freq_get(void *data, u64 *val)
3504 {
3505 	struct drm_device *dev = data;
3506 	struct drm_i915_private *dev_priv = dev->dev_private;
3507 	int ret;
3508 
3509 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3510 		return -ENODEV;
3511 
3512 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3513 
3514 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3515 	if (ret)
3516 		return ret;
3517 
3518 	if (IS_VALLEYVIEW(dev))
3519 		*val = vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
3520 	else
3521 		*val = dev_priv->rps.max_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3522 	mutex_unlock(&dev_priv->rps.hw_lock);
3523 
3524 	return 0;
3525 }
3526 
3527 static int
3528 i915_max_freq_set(void *data, u64 val)
3529 {
3530 	struct drm_device *dev = data;
3531 	struct drm_i915_private *dev_priv = dev->dev_private;
3532 	u32 rp_state_cap, hw_max, hw_min;
3533 	int ret;
3534 
3535 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3536 		return -ENODEV;
3537 
3538 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3539 
3540 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
3541 
3542 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3543 	if (ret)
3544 		return ret;
3545 
3546 	/*
3547 	 * Turbo will still be enabled, but won't go above the set value.
3548 	 */
3549 	if (IS_VALLEYVIEW(dev)) {
3550 		val = vlv_freq_opcode(dev_priv, val);
3551 
3552 		hw_max = valleyview_rps_max_freq(dev_priv);
3553 		hw_min = valleyview_rps_min_freq(dev_priv);
3554 	} else {
3555 		do_div(val, GT_FREQUENCY_MULTIPLIER);
3556 
3557 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3558 		hw_max = dev_priv->rps.max_freq;
3559 		hw_min = (rp_state_cap >> 16) & 0xff;
3560 	}
3561 
3562 	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
3563 		mutex_unlock(&dev_priv->rps.hw_lock);
3564 		return -EINVAL;
3565 	}
3566 
3567 	dev_priv->rps.max_freq_softlimit = val;
3568 
3569 	if (IS_VALLEYVIEW(dev))
3570 		valleyview_set_rps(dev, val);
3571 	else
3572 		gen6_set_rps(dev, val);
3573 
3574 	mutex_unlock(&dev_priv->rps.hw_lock);
3575 
3576 	return 0;
3577 }
3578 
3579 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
3580 			i915_max_freq_get, i915_max_freq_set,
3581 			"%llu\n");
3582 
3583 static int
3584 i915_min_freq_get(void *data, u64 *val)
3585 {
3586 	struct drm_device *dev = data;
3587 	struct drm_i915_private *dev_priv = dev->dev_private;
3588 	int ret;
3589 
3590 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3591 		return -ENODEV;
3592 
3593 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3594 
3595 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3596 	if (ret)
3597 		return ret;
3598 
3599 	if (IS_VALLEYVIEW(dev))
3600 		*val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
3601 	else
3602 		*val = dev_priv->rps.min_freq_softlimit * GT_FREQUENCY_MULTIPLIER;
3603 	mutex_unlock(&dev_priv->rps.hw_lock);
3604 
3605 	return 0;
3606 }
3607 
3608 static int
3609 i915_min_freq_set(void *data, u64 val)
3610 {
3611 	struct drm_device *dev = data;
3612 	struct drm_i915_private *dev_priv = dev->dev_private;
3613 	u32 rp_state_cap, hw_max, hw_min;
3614 	int ret;
3615 
3616 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3617 		return -ENODEV;
3618 
3619 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
3620 
3621 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
3622 
3623 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
3624 	if (ret)
3625 		return ret;
3626 
3627 	/*
3628 	 * Turbo will still be enabled, but won't go below the set value.
3629 	 */
3630 	if (IS_VALLEYVIEW(dev)) {
3631 		val = vlv_freq_opcode(dev_priv, val);
3632 
3633 		hw_max = valleyview_rps_max_freq(dev_priv);
3634 		hw_min = valleyview_rps_min_freq(dev_priv);
3635 	} else {
3636 		do_div(val, GT_FREQUENCY_MULTIPLIER);
3637 
3638 		rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3639 		hw_max = dev_priv->rps.max_freq;
3640 		hw_min = (rp_state_cap >> 16) & 0xff;
3641 	}
3642 
3643 	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
3644 		mutex_unlock(&dev_priv->rps.hw_lock);
3645 		return -EINVAL;
3646 	}
3647 
3648 	dev_priv->rps.min_freq_softlimit = val;
3649 
3650 	if (IS_VALLEYVIEW(dev))
3651 		valleyview_set_rps(dev, val);
3652 	else
3653 		gen6_set_rps(dev, val);
3654 
3655 	mutex_unlock(&dev_priv->rps.hw_lock);
3656 
3657 	return 0;
3658 }
3659 
3660 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
3661 			i915_min_freq_get, i915_min_freq_set,
3662 			"%llu\n");
3663 
3664 static int
3665 i915_cache_sharing_get(void *data, u64 *val)
3666 {
3667 	struct drm_device *dev = data;
3668 	struct drm_i915_private *dev_priv = dev->dev_private;
3669 	u32 snpcr;
3670 	int ret;
3671 
3672 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3673 		return -ENODEV;
3674 
3675 	ret = mutex_lock_interruptible(&dev->struct_mutex);
3676 	if (ret)
3677 		return ret;
3678 	intel_runtime_pm_get(dev_priv);
3679 
3680 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3681 
3682 	intel_runtime_pm_put(dev_priv);
3683 	mutex_unlock(&dev_priv->dev->struct_mutex);
3684 
3685 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3686 
3687 	return 0;
3688 }
3689 
3690 static int
3691 i915_cache_sharing_set(void *data, u64 val)
3692 {
3693 	struct drm_device *dev = data;
3694 	struct drm_i915_private *dev_priv = dev->dev_private;
3695 	u32 snpcr;
3696 
3697 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
3698 		return -ENODEV;
3699 
3700 	if (val > 3)
3701 		return -EINVAL;
3702 
3703 	intel_runtime_pm_get(dev_priv);
3704 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3705 
3706 	/* Update the cache sharing policy here as well */
3707 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3708 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
3709 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
3710 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3711 
3712 	intel_runtime_pm_put(dev_priv);
3713 	return 0;
3714 }
3715 
3716 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3717 			i915_cache_sharing_get, i915_cache_sharing_set,
3718 			"%llu\n");
3719 
3720 static int i915_forcewake_open(struct inode *inode, struct file *file)
3721 {
3722 	struct drm_device *dev = inode->i_private;
3723 	struct drm_i915_private *dev_priv = dev->dev_private;
3724 
3725 	if (INTEL_INFO(dev)->gen < 6)
3726 		return 0;
3727 
3728 	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3729 
3730 	return 0;
3731 }
3732 
3733 static int i915_forcewake_release(struct inode *inode, struct file *file)
3734 {
3735 	struct drm_device *dev = inode->i_private;
3736 	struct drm_i915_private *dev_priv = dev->dev_private;
3737 
3738 	if (INTEL_INFO(dev)->gen < 6)
3739 		return 0;
3740 
3741 	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3742 
3743 	return 0;
3744 }
3745 
3746 static const struct file_operations i915_forcewake_fops = {
3747 	.owner = THIS_MODULE,
3748 	.open = i915_forcewake_open,
3749 	.release = i915_forcewake_release,
3750 };
3751 
3752 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
3753 {
3754 	struct drm_device *dev = minor->dev;
3755 	struct dentry *ent;
3756 
3757 	ent = debugfs_create_file("i915_forcewake_user",
3758 				  S_IRUSR,
3759 				  root, dev,
3760 				  &i915_forcewake_fops);
3761 	if (!ent)
3762 		return -ENOMEM;
3763 
3764 	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
3765 }
3766 
3767 static int i915_debugfs_create(struct dentry *root,
3768 			       struct drm_minor *minor,
3769 			       const char *name,
3770 			       const struct file_operations *fops)
3771 {
3772 	struct drm_device *dev = minor->dev;
3773 	struct dentry *ent;
3774 
3775 	ent = debugfs_create_file(name,
3776 				  S_IRUGO | S_IWUSR,
3777 				  root, dev,
3778 				  fops);
3779 	if (!ent)
3780 		return -ENOMEM;
3781 
3782 	return drm_add_fake_info_node(minor, ent, fops);
3783 }
3784 
3785 static const struct drm_info_list i915_debugfs_list[] = {
3786 	{"i915_capabilities", i915_capabilities, 0},
3787 	{"i915_gem_objects", i915_gem_object_info, 0},
3788 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
3789 	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
3790 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
3791 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
3792 	{"i915_gem_stolen", i915_gem_stolen_list_info },
3793 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
3794 	{"i915_gem_request", i915_gem_request_info, 0},
3795 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
3796 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
3797 	{"i915_gem_interrupt", i915_interrupt_info, 0},
3798 	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
3799 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
3800 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
3801 	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
3802 	{"i915_rstdby_delays", i915_rstdby_delays, 0},
3803 	{"i915_frequency_info", i915_frequency_info, 0},
3804 	{"i915_delayfreq_table", i915_delayfreq_table, 0},
3805 	{"i915_inttoext_table", i915_inttoext_table, 0},
3806 	{"i915_drpc_info", i915_drpc_info, 0},
3807 	{"i915_emon_status", i915_emon_status, 0},
3808 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
3809 	{"i915_gfxec", i915_gfxec, 0},
3810 	{"i915_fbc_status", i915_fbc_status, 0},
3811 	{"i915_ips_status", i915_ips_status, 0},
3812 	{"i915_sr_status", i915_sr_status, 0},
3813 	{"i915_opregion", i915_opregion, 0},
3814 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
3815 	{"i915_context_status", i915_context_status, 0},
3816 	{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
3817 	{"i915_swizzle_info", i915_swizzle_info, 0},
3818 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
3819 	{"i915_llc", i915_llc, 0},
3820 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
3821 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
3822 	{"i915_energy_uJ", i915_energy_uJ, 0},
3823 	{"i915_pc8_status", i915_pc8_status, 0},
3824 	{"i915_power_domain_info", i915_power_domain_info, 0},
3825 	{"i915_display_info", i915_display_info, 0},
3826 };
3827 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
3828 
3829 static const struct i915_debugfs_files {
3830 	const char *name;
3831 	const struct file_operations *fops;
3832 } i915_debugfs_files[] = {
3833 	{"i915_wedged", &i915_wedged_fops},
3834 	{"i915_max_freq", &i915_max_freq_fops},
3835 	{"i915_min_freq", &i915_min_freq_fops},
3836 	{"i915_cache_sharing", &i915_cache_sharing_fops},
3837 	{"i915_ring_stop", &i915_ring_stop_fops},
3838 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
3839 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
3840 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
3841 	{"i915_error_state", &i915_error_state_fops},
3842 	{"i915_next_seqno", &i915_next_seqno_fops},
3843 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
3844 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
3845 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
3846 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
3847 };
3848 
3849 void intel_display_crc_init(struct drm_device *dev)
3850 {
3851 	struct drm_i915_private *dev_priv = dev->dev_private;
3852 	enum pipe pipe;
3853 
3854 	for_each_pipe(pipe) {
3855 		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3856 
3857 		pipe_crc->opened = false;
3858 		spin_lock_init(&pipe_crc->lock);
3859 		init_waitqueue_head(&pipe_crc->wq);
3860 	}
3861 }
3862 
3863 int i915_debugfs_init(struct drm_minor *minor)
3864 {
3865 	int ret, i;
3866 
3867 	ret = i915_forcewake_create(minor->debugfs_root, minor);
3868 	if (ret)
3869 		return ret;
3870 
3871 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3872 		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
3873 		if (ret)
3874 			return ret;
3875 	}
3876 
3877 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
3878 		ret = i915_debugfs_create(minor->debugfs_root, minor,
3879 					  i915_debugfs_files[i].name,
3880 					  i915_debugfs_files[i].fops);
3881 		if (ret)
3882 			return ret;
3883 	}
3884 
3885 	return drm_debugfs_create_files(i915_debugfs_list,
3886 					I915_DEBUGFS_ENTRIES,
3887 					minor->debugfs_root, minor);
3888 }
3889 
3890 void i915_debugfs_cleanup(struct drm_minor *minor)
3891 {
3892 	int i;
3893 
3894 	drm_debugfs_remove_files(i915_debugfs_list,
3895 				 I915_DEBUGFS_ENTRIES, minor);
3896 
3897 	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
3898 				 1, minor);
3899 
3900 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
3901 		struct drm_info_list *info_list =
3902 			(struct drm_info_list *)&i915_pipe_crc_data[i];
3903 
3904 		drm_debugfs_remove_files(info_list, 1, minor);
3905 	}
3906 
3907 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
3908 		struct drm_info_list *info_list =
3909 			(struct drm_info_list *) i915_debugfs_files[i].fops;
3910 
3911 		drm_debugfs_remove_files(info_list, 1, minor);
3912 	}
3913 }
3914