xref: /openbmc/linux/drivers/gpu/drm/i915/i915_debugfs.c (revision 840ef8b7cc584a23c4f9d05352f4dbaf8e56e5ab)
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/seq_file.h>
30 #include <linux/debugfs.h>
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <generated/utsrelease.h>
34 #include <drm/drmP.h>
35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 
40 #define DRM_I915_RING_DEBUG 1
41 
42 
43 #if defined(CONFIG_DEBUG_FS)
44 
45 enum {
46 	ACTIVE_LIST,
47 	INACTIVE_LIST,
48 	PINNED_LIST,
49 };
50 
51 static const char *yesno(int v)
52 {
53 	return v ? "yes" : "no";
54 }
55 
56 static int i915_capabilities(struct seq_file *m, void *data)
57 {
58 	struct drm_info_node *node = (struct drm_info_node *) m->private;
59 	struct drm_device *dev = node->minor->dev;
60 	const struct intel_device_info *info = INTEL_INFO(dev);
61 
62 	seq_printf(m, "gen: %d\n", info->gen);
63 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
64 #define DEV_INFO_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
65 #define DEV_INFO_SEP ;
66 	DEV_INFO_FLAGS;
67 #undef DEV_INFO_FLAG
68 #undef DEV_INFO_SEP
69 
70 	return 0;
71 }
72 
73 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
74 {
75 	if (obj->user_pin_count > 0)
76 		return "P";
77 	else if (obj->pin_count > 0)
78 		return "p";
79 	else
80 		return " ";
81 }
82 
83 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
84 {
85 	switch (obj->tiling_mode) {
86 	default:
87 	case I915_TILING_NONE: return " ";
88 	case I915_TILING_X: return "X";
89 	case I915_TILING_Y: return "Y";
90 	}
91 }
92 
93 static const char *cache_level_str(int type)
94 {
95 	switch (type) {
96 	case I915_CACHE_NONE: return " uncached";
97 	case I915_CACHE_LLC: return " snooped (LLC)";
98 	case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
99 	default: return "";
100 	}
101 }
102 
103 static void
104 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
105 {
106 	seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
107 		   &obj->base,
108 		   get_pin_flag(obj),
109 		   get_tiling_flag(obj),
110 		   obj->base.size / 1024,
111 		   obj->base.read_domains,
112 		   obj->base.write_domain,
113 		   obj->last_read_seqno,
114 		   obj->last_write_seqno,
115 		   obj->last_fenced_seqno,
116 		   cache_level_str(obj->cache_level),
117 		   obj->dirty ? " dirty" : "",
118 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
119 	if (obj->base.name)
120 		seq_printf(m, " (name: %d)", obj->base.name);
121 	if (obj->pin_count)
122 		seq_printf(m, " (pinned x %d)", obj->pin_count);
123 	if (obj->fence_reg != I915_FENCE_REG_NONE)
124 		seq_printf(m, " (fence: %d)", obj->fence_reg);
125 	if (obj->gtt_space != NULL)
126 		seq_printf(m, " (gtt offset: %08x, size: %08x)",
127 			   obj->gtt_offset, (unsigned int)obj->gtt_space->size);
128 	if (obj->stolen)
129 		seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
130 	if (obj->pin_mappable || obj->fault_mappable) {
131 		char s[3], *t = s;
132 		if (obj->pin_mappable)
133 			*t++ = 'p';
134 		if (obj->fault_mappable)
135 			*t++ = 'f';
136 		*t = '\0';
137 		seq_printf(m, " (%s mappable)", s);
138 	}
139 	if (obj->ring != NULL)
140 		seq_printf(m, " (%s)", obj->ring->name);
141 }
142 
143 static int i915_gem_object_list_info(struct seq_file *m, void *data)
144 {
145 	struct drm_info_node *node = (struct drm_info_node *) m->private;
146 	uintptr_t list = (uintptr_t) node->info_ent->data;
147 	struct list_head *head;
148 	struct drm_device *dev = node->minor->dev;
149 	drm_i915_private_t *dev_priv = dev->dev_private;
150 	struct drm_i915_gem_object *obj;
151 	size_t total_obj_size, total_gtt_size;
152 	int count, ret;
153 
154 	ret = mutex_lock_interruptible(&dev->struct_mutex);
155 	if (ret)
156 		return ret;
157 
158 	switch (list) {
159 	case ACTIVE_LIST:
160 		seq_printf(m, "Active:\n");
161 		head = &dev_priv->mm.active_list;
162 		break;
163 	case INACTIVE_LIST:
164 		seq_printf(m, "Inactive:\n");
165 		head = &dev_priv->mm.inactive_list;
166 		break;
167 	default:
168 		mutex_unlock(&dev->struct_mutex);
169 		return -EINVAL;
170 	}
171 
172 	total_obj_size = total_gtt_size = count = 0;
173 	list_for_each_entry(obj, head, mm_list) {
174 		seq_printf(m, "   ");
175 		describe_obj(m, obj);
176 		seq_printf(m, "\n");
177 		total_obj_size += obj->base.size;
178 		total_gtt_size += obj->gtt_space->size;
179 		count++;
180 	}
181 	mutex_unlock(&dev->struct_mutex);
182 
183 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
184 		   count, total_obj_size, total_gtt_size);
185 	return 0;
186 }
187 
188 #define count_objects(list, member) do { \
189 	list_for_each_entry(obj, list, member) { \
190 		size += obj->gtt_space->size; \
191 		++count; \
192 		if (obj->map_and_fenceable) { \
193 			mappable_size += obj->gtt_space->size; \
194 			++mappable_count; \
195 		} \
196 	} \
197 } while (0)
198 
199 static int i915_gem_object_info(struct seq_file *m, void* data)
200 {
201 	struct drm_info_node *node = (struct drm_info_node *) m->private;
202 	struct drm_device *dev = node->minor->dev;
203 	struct drm_i915_private *dev_priv = dev->dev_private;
204 	u32 count, mappable_count, purgeable_count;
205 	size_t size, mappable_size, purgeable_size;
206 	struct drm_i915_gem_object *obj;
207 	int ret;
208 
209 	ret = mutex_lock_interruptible(&dev->struct_mutex);
210 	if (ret)
211 		return ret;
212 
213 	seq_printf(m, "%u objects, %zu bytes\n",
214 		   dev_priv->mm.object_count,
215 		   dev_priv->mm.object_memory);
216 
217 	size = count = mappable_size = mappable_count = 0;
218 	count_objects(&dev_priv->mm.bound_list, gtt_list);
219 	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
220 		   count, mappable_count, size, mappable_size);
221 
222 	size = count = mappable_size = mappable_count = 0;
223 	count_objects(&dev_priv->mm.active_list, mm_list);
224 	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
225 		   count, mappable_count, size, mappable_size);
226 
227 	size = count = mappable_size = mappable_count = 0;
228 	count_objects(&dev_priv->mm.inactive_list, mm_list);
229 	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
230 		   count, mappable_count, size, mappable_size);
231 
232 	size = count = purgeable_size = purgeable_count = 0;
233 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) {
234 		size += obj->base.size, ++count;
235 		if (obj->madv == I915_MADV_DONTNEED)
236 			purgeable_size += obj->base.size, ++purgeable_count;
237 	}
238 	seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
239 
240 	size = count = mappable_size = mappable_count = 0;
241 	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
242 		if (obj->fault_mappable) {
243 			size += obj->gtt_space->size;
244 			++count;
245 		}
246 		if (obj->pin_mappable) {
247 			mappable_size += obj->gtt_space->size;
248 			++mappable_count;
249 		}
250 		if (obj->madv == I915_MADV_DONTNEED) {
251 			purgeable_size += obj->base.size;
252 			++purgeable_count;
253 		}
254 	}
255 	seq_printf(m, "%u purgeable objects, %zu bytes\n",
256 		   purgeable_count, purgeable_size);
257 	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
258 		   mappable_count, mappable_size);
259 	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
260 		   count, size);
261 
262 	seq_printf(m, "%zu [%lu] gtt total\n",
263 		   dev_priv->gtt.total,
264 		   dev_priv->gtt.mappable_end - dev_priv->gtt.start);
265 
266 	mutex_unlock(&dev->struct_mutex);
267 
268 	return 0;
269 }
270 
271 static int i915_gem_gtt_info(struct seq_file *m, void* data)
272 {
273 	struct drm_info_node *node = (struct drm_info_node *) m->private;
274 	struct drm_device *dev = node->minor->dev;
275 	uintptr_t list = (uintptr_t) node->info_ent->data;
276 	struct drm_i915_private *dev_priv = dev->dev_private;
277 	struct drm_i915_gem_object *obj;
278 	size_t total_obj_size, total_gtt_size;
279 	int count, ret;
280 
281 	ret = mutex_lock_interruptible(&dev->struct_mutex);
282 	if (ret)
283 		return ret;
284 
285 	total_obj_size = total_gtt_size = count = 0;
286 	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
287 		if (list == PINNED_LIST && obj->pin_count == 0)
288 			continue;
289 
290 		seq_printf(m, "   ");
291 		describe_obj(m, obj);
292 		seq_printf(m, "\n");
293 		total_obj_size += obj->base.size;
294 		total_gtt_size += obj->gtt_space->size;
295 		count++;
296 	}
297 
298 	mutex_unlock(&dev->struct_mutex);
299 
300 	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
301 		   count, total_obj_size, total_gtt_size);
302 
303 	return 0;
304 }
305 
306 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
307 {
308 	struct drm_info_node *node = (struct drm_info_node *) m->private;
309 	struct drm_device *dev = node->minor->dev;
310 	unsigned long flags;
311 	struct intel_crtc *crtc;
312 
313 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
314 		const char pipe = pipe_name(crtc->pipe);
315 		const char plane = plane_name(crtc->plane);
316 		struct intel_unpin_work *work;
317 
318 		spin_lock_irqsave(&dev->event_lock, flags);
319 		work = crtc->unpin_work;
320 		if (work == NULL) {
321 			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
322 				   pipe, plane);
323 		} else {
324 			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
325 				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
326 					   pipe, plane);
327 			} else {
328 				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
329 					   pipe, plane);
330 			}
331 			if (work->enable_stall_check)
332 				seq_printf(m, "Stall check enabled, ");
333 			else
334 				seq_printf(m, "Stall check waiting for page flip ioctl, ");
335 			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
336 
337 			if (work->old_fb_obj) {
338 				struct drm_i915_gem_object *obj = work->old_fb_obj;
339 				if (obj)
340 					seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
341 			}
342 			if (work->pending_flip_obj) {
343 				struct drm_i915_gem_object *obj = work->pending_flip_obj;
344 				if (obj)
345 					seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
346 			}
347 		}
348 		spin_unlock_irqrestore(&dev->event_lock, flags);
349 	}
350 
351 	return 0;
352 }
353 
354 static int i915_gem_request_info(struct seq_file *m, void *data)
355 {
356 	struct drm_info_node *node = (struct drm_info_node *) m->private;
357 	struct drm_device *dev = node->minor->dev;
358 	drm_i915_private_t *dev_priv = dev->dev_private;
359 	struct intel_ring_buffer *ring;
360 	struct drm_i915_gem_request *gem_request;
361 	int ret, count, i;
362 
363 	ret = mutex_lock_interruptible(&dev->struct_mutex);
364 	if (ret)
365 		return ret;
366 
367 	count = 0;
368 	for_each_ring(ring, dev_priv, i) {
369 		if (list_empty(&ring->request_list))
370 			continue;
371 
372 		seq_printf(m, "%s requests:\n", ring->name);
373 		list_for_each_entry(gem_request,
374 				    &ring->request_list,
375 				    list) {
376 			seq_printf(m, "    %d @ %d\n",
377 				   gem_request->seqno,
378 				   (int) (jiffies - gem_request->emitted_jiffies));
379 		}
380 		count++;
381 	}
382 	mutex_unlock(&dev->struct_mutex);
383 
384 	if (count == 0)
385 		seq_printf(m, "No requests\n");
386 
387 	return 0;
388 }
389 
390 static void i915_ring_seqno_info(struct seq_file *m,
391 				 struct intel_ring_buffer *ring)
392 {
393 	if (ring->get_seqno) {
394 		seq_printf(m, "Current sequence (%s): %u\n",
395 			   ring->name, ring->get_seqno(ring, false));
396 	}
397 }
398 
399 static int i915_gem_seqno_info(struct seq_file *m, void *data)
400 {
401 	struct drm_info_node *node = (struct drm_info_node *) m->private;
402 	struct drm_device *dev = node->minor->dev;
403 	drm_i915_private_t *dev_priv = dev->dev_private;
404 	struct intel_ring_buffer *ring;
405 	int ret, i;
406 
407 	ret = mutex_lock_interruptible(&dev->struct_mutex);
408 	if (ret)
409 		return ret;
410 
411 	for_each_ring(ring, dev_priv, i)
412 		i915_ring_seqno_info(m, ring);
413 
414 	mutex_unlock(&dev->struct_mutex);
415 
416 	return 0;
417 }
418 
419 
420 static int i915_interrupt_info(struct seq_file *m, void *data)
421 {
422 	struct drm_info_node *node = (struct drm_info_node *) m->private;
423 	struct drm_device *dev = node->minor->dev;
424 	drm_i915_private_t *dev_priv = dev->dev_private;
425 	struct intel_ring_buffer *ring;
426 	int ret, i, pipe;
427 
428 	ret = mutex_lock_interruptible(&dev->struct_mutex);
429 	if (ret)
430 		return ret;
431 
432 	if (IS_VALLEYVIEW(dev)) {
433 		seq_printf(m, "Display IER:\t%08x\n",
434 			   I915_READ(VLV_IER));
435 		seq_printf(m, "Display IIR:\t%08x\n",
436 			   I915_READ(VLV_IIR));
437 		seq_printf(m, "Display IIR_RW:\t%08x\n",
438 			   I915_READ(VLV_IIR_RW));
439 		seq_printf(m, "Display IMR:\t%08x\n",
440 			   I915_READ(VLV_IMR));
441 		for_each_pipe(pipe)
442 			seq_printf(m, "Pipe %c stat:\t%08x\n",
443 				   pipe_name(pipe),
444 				   I915_READ(PIPESTAT(pipe)));
445 
446 		seq_printf(m, "Master IER:\t%08x\n",
447 			   I915_READ(VLV_MASTER_IER));
448 
449 		seq_printf(m, "Render IER:\t%08x\n",
450 			   I915_READ(GTIER));
451 		seq_printf(m, "Render IIR:\t%08x\n",
452 			   I915_READ(GTIIR));
453 		seq_printf(m, "Render IMR:\t%08x\n",
454 			   I915_READ(GTIMR));
455 
456 		seq_printf(m, "PM IER:\t\t%08x\n",
457 			   I915_READ(GEN6_PMIER));
458 		seq_printf(m, "PM IIR:\t\t%08x\n",
459 			   I915_READ(GEN6_PMIIR));
460 		seq_printf(m, "PM IMR:\t\t%08x\n",
461 			   I915_READ(GEN6_PMIMR));
462 
463 		seq_printf(m, "Port hotplug:\t%08x\n",
464 			   I915_READ(PORT_HOTPLUG_EN));
465 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
466 			   I915_READ(VLV_DPFLIPSTAT));
467 		seq_printf(m, "DPINVGTT:\t%08x\n",
468 			   I915_READ(DPINVGTT));
469 
470 	} else if (!HAS_PCH_SPLIT(dev)) {
471 		seq_printf(m, "Interrupt enable:    %08x\n",
472 			   I915_READ(IER));
473 		seq_printf(m, "Interrupt identity:  %08x\n",
474 			   I915_READ(IIR));
475 		seq_printf(m, "Interrupt mask:      %08x\n",
476 			   I915_READ(IMR));
477 		for_each_pipe(pipe)
478 			seq_printf(m, "Pipe %c stat:         %08x\n",
479 				   pipe_name(pipe),
480 				   I915_READ(PIPESTAT(pipe)));
481 	} else {
482 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
483 			   I915_READ(DEIER));
484 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
485 			   I915_READ(DEIIR));
486 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
487 			   I915_READ(DEIMR));
488 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
489 			   I915_READ(SDEIER));
490 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
491 			   I915_READ(SDEIIR));
492 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
493 			   I915_READ(SDEIMR));
494 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
495 			   I915_READ(GTIER));
496 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
497 			   I915_READ(GTIIR));
498 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
499 			   I915_READ(GTIMR));
500 	}
501 	seq_printf(m, "Interrupts received: %d\n",
502 		   atomic_read(&dev_priv->irq_received));
503 	for_each_ring(ring, dev_priv, i) {
504 		if (IS_GEN6(dev) || IS_GEN7(dev)) {
505 			seq_printf(m,
506 				   "Graphics Interrupt mask (%s):	%08x\n",
507 				   ring->name, I915_READ_IMR(ring));
508 		}
509 		i915_ring_seqno_info(m, ring);
510 	}
511 	mutex_unlock(&dev->struct_mutex);
512 
513 	return 0;
514 }
515 
516 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
517 {
518 	struct drm_info_node *node = (struct drm_info_node *) m->private;
519 	struct drm_device *dev = node->minor->dev;
520 	drm_i915_private_t *dev_priv = dev->dev_private;
521 	int i, ret;
522 
523 	ret = mutex_lock_interruptible(&dev->struct_mutex);
524 	if (ret)
525 		return ret;
526 
527 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
528 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
529 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
530 		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
531 
532 		seq_printf(m, "Fence %d, pin count = %d, object = ",
533 			   i, dev_priv->fence_regs[i].pin_count);
534 		if (obj == NULL)
535 			seq_printf(m, "unused");
536 		else
537 			describe_obj(m, obj);
538 		seq_printf(m, "\n");
539 	}
540 
541 	mutex_unlock(&dev->struct_mutex);
542 	return 0;
543 }
544 
545 static int i915_hws_info(struct seq_file *m, void *data)
546 {
547 	struct drm_info_node *node = (struct drm_info_node *) m->private;
548 	struct drm_device *dev = node->minor->dev;
549 	drm_i915_private_t *dev_priv = dev->dev_private;
550 	struct intel_ring_buffer *ring;
551 	const u32 *hws;
552 	int i;
553 
554 	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
555 	hws = ring->status_page.page_addr;
556 	if (hws == NULL)
557 		return 0;
558 
559 	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
560 		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
561 			   i * 4,
562 			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
563 	}
564 	return 0;
565 }
566 
567 static const char *ring_str(int ring)
568 {
569 	switch (ring) {
570 	case RCS: return "render";
571 	case VCS: return "bsd";
572 	case BCS: return "blt";
573 	default: return "";
574 	}
575 }
576 
577 static const char *pin_flag(int pinned)
578 {
579 	if (pinned > 0)
580 		return " P";
581 	else if (pinned < 0)
582 		return " p";
583 	else
584 		return "";
585 }
586 
587 static const char *tiling_flag(int tiling)
588 {
589 	switch (tiling) {
590 	default:
591 	case I915_TILING_NONE: return "";
592 	case I915_TILING_X: return " X";
593 	case I915_TILING_Y: return " Y";
594 	}
595 }
596 
597 static const char *dirty_flag(int dirty)
598 {
599 	return dirty ? " dirty" : "";
600 }
601 
602 static const char *purgeable_flag(int purgeable)
603 {
604 	return purgeable ? " purgeable" : "";
605 }
606 
607 static void print_error_buffers(struct seq_file *m,
608 				const char *name,
609 				struct drm_i915_error_buffer *err,
610 				int count)
611 {
612 	seq_printf(m, "%s [%d]:\n", name, count);
613 
614 	while (count--) {
615 		seq_printf(m, "  %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
616 			   err->gtt_offset,
617 			   err->size,
618 			   err->read_domains,
619 			   err->write_domain,
620 			   err->rseqno, err->wseqno,
621 			   pin_flag(err->pinned),
622 			   tiling_flag(err->tiling),
623 			   dirty_flag(err->dirty),
624 			   purgeable_flag(err->purgeable),
625 			   err->ring != -1 ? " " : "",
626 			   ring_str(err->ring),
627 			   cache_level_str(err->cache_level));
628 
629 		if (err->name)
630 			seq_printf(m, " (name: %d)", err->name);
631 		if (err->fence_reg != I915_FENCE_REG_NONE)
632 			seq_printf(m, " (fence: %d)", err->fence_reg);
633 
634 		seq_printf(m, "\n");
635 		err++;
636 	}
637 }
638 
639 static void i915_ring_error_state(struct seq_file *m,
640 				  struct drm_device *dev,
641 				  struct drm_i915_error_state *error,
642 				  unsigned ring)
643 {
644 	BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
645 	seq_printf(m, "%s command stream:\n", ring_str(ring));
646 	seq_printf(m, "  HEAD: 0x%08x\n", error->head[ring]);
647 	seq_printf(m, "  TAIL: 0x%08x\n", error->tail[ring]);
648 	seq_printf(m, "  CTL: 0x%08x\n", error->ctl[ring]);
649 	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd[ring]);
650 	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir[ring]);
651 	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr[ring]);
652 	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone[ring]);
653 	if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
654 		seq_printf(m, "  BBADDR: 0x%08llx\n", error->bbaddr);
655 
656 	if (INTEL_INFO(dev)->gen >= 4)
657 		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps[ring]);
658 	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm[ring]);
659 	seq_printf(m, "  FADDR: 0x%08x\n", error->faddr[ring]);
660 	if (INTEL_INFO(dev)->gen >= 6) {
661 		seq_printf(m, "  RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
662 		seq_printf(m, "  FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
663 		seq_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
664 			   error->semaphore_mboxes[ring][0],
665 			   error->semaphore_seqno[ring][0]);
666 		seq_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
667 			   error->semaphore_mboxes[ring][1],
668 			   error->semaphore_seqno[ring][1]);
669 	}
670 	seq_printf(m, "  seqno: 0x%08x\n", error->seqno[ring]);
671 	seq_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
672 	seq_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
673 	seq_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
674 }
675 
676 struct i915_error_state_file_priv {
677 	struct drm_device *dev;
678 	struct drm_i915_error_state *error;
679 };
680 
681 static int i915_error_state(struct seq_file *m, void *unused)
682 {
683 	struct i915_error_state_file_priv *error_priv = m->private;
684 	struct drm_device *dev = error_priv->dev;
685 	drm_i915_private_t *dev_priv = dev->dev_private;
686 	struct drm_i915_error_state *error = error_priv->error;
687 	struct intel_ring_buffer *ring;
688 	int i, j, page, offset, elt;
689 
690 	if (!error) {
691 		seq_printf(m, "no error state collected\n");
692 		return 0;
693 	}
694 
695 	seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
696 		   error->time.tv_usec);
697 	seq_printf(m, "Kernel: " UTS_RELEASE "\n");
698 	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
699 	seq_printf(m, "EIR: 0x%08x\n", error->eir);
700 	seq_printf(m, "IER: 0x%08x\n", error->ier);
701 	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
702 	seq_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
703 	seq_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
704 	seq_printf(m, "CCID: 0x%08x\n", error->ccid);
705 
706 	for (i = 0; i < dev_priv->num_fence_regs; i++)
707 		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
708 
709 	for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
710 		seq_printf(m, "  INSTDONE_%d: 0x%08x\n", i, error->extra_instdone[i]);
711 
712 	if (INTEL_INFO(dev)->gen >= 6) {
713 		seq_printf(m, "ERROR: 0x%08x\n", error->error);
714 		seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
715 	}
716 
717 	if (INTEL_INFO(dev)->gen == 7)
718 		seq_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
719 
720 	for_each_ring(ring, dev_priv, i)
721 		i915_ring_error_state(m, dev, error, i);
722 
723 	if (error->active_bo)
724 		print_error_buffers(m, "Active",
725 				    error->active_bo,
726 				    error->active_bo_count);
727 
728 	if (error->pinned_bo)
729 		print_error_buffers(m, "Pinned",
730 				    error->pinned_bo,
731 				    error->pinned_bo_count);
732 
733 	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
734 		struct drm_i915_error_object *obj;
735 
736 		if ((obj = error->ring[i].batchbuffer)) {
737 			seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
738 				   dev_priv->ring[i].name,
739 				   obj->gtt_offset);
740 			offset = 0;
741 			for (page = 0; page < obj->page_count; page++) {
742 				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
743 					seq_printf(m, "%08x :  %08x\n", offset, obj->pages[page][elt]);
744 					offset += 4;
745 				}
746 			}
747 		}
748 
749 		if (error->ring[i].num_requests) {
750 			seq_printf(m, "%s --- %d requests\n",
751 				   dev_priv->ring[i].name,
752 				   error->ring[i].num_requests);
753 			for (j = 0; j < error->ring[i].num_requests; j++) {
754 				seq_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
755 					   error->ring[i].requests[j].seqno,
756 					   error->ring[i].requests[j].jiffies,
757 					   error->ring[i].requests[j].tail);
758 			}
759 		}
760 
761 		if ((obj = error->ring[i].ringbuffer)) {
762 			seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
763 				   dev_priv->ring[i].name,
764 				   obj->gtt_offset);
765 			offset = 0;
766 			for (page = 0; page < obj->page_count; page++) {
767 				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
768 					seq_printf(m, "%08x :  %08x\n",
769 						   offset,
770 						   obj->pages[page][elt]);
771 					offset += 4;
772 				}
773 			}
774 		}
775 	}
776 
777 	if (error->overlay)
778 		intel_overlay_print_error_state(m, error->overlay);
779 
780 	if (error->display)
781 		intel_display_print_error_state(m, dev, error->display);
782 
783 	return 0;
784 }
785 
786 static ssize_t
787 i915_error_state_write(struct file *filp,
788 		       const char __user *ubuf,
789 		       size_t cnt,
790 		       loff_t *ppos)
791 {
792 	struct seq_file *m = filp->private_data;
793 	struct i915_error_state_file_priv *error_priv = m->private;
794 	struct drm_device *dev = error_priv->dev;
795 	int ret;
796 
797 	DRM_DEBUG_DRIVER("Resetting error state\n");
798 
799 	ret = mutex_lock_interruptible(&dev->struct_mutex);
800 	if (ret)
801 		return ret;
802 
803 	i915_destroy_error_state(dev);
804 	mutex_unlock(&dev->struct_mutex);
805 
806 	return cnt;
807 }
808 
809 static int i915_error_state_open(struct inode *inode, struct file *file)
810 {
811 	struct drm_device *dev = inode->i_private;
812 	drm_i915_private_t *dev_priv = dev->dev_private;
813 	struct i915_error_state_file_priv *error_priv;
814 	unsigned long flags;
815 
816 	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
817 	if (!error_priv)
818 		return -ENOMEM;
819 
820 	error_priv->dev = dev;
821 
822 	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
823 	error_priv->error = dev_priv->gpu_error.first_error;
824 	if (error_priv->error)
825 		kref_get(&error_priv->error->ref);
826 	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
827 
828 	return single_open(file, i915_error_state, error_priv);
829 }
830 
831 static int i915_error_state_release(struct inode *inode, struct file *file)
832 {
833 	struct seq_file *m = file->private_data;
834 	struct i915_error_state_file_priv *error_priv = m->private;
835 
836 	if (error_priv->error)
837 		kref_put(&error_priv->error->ref, i915_error_state_free);
838 	kfree(error_priv);
839 
840 	return single_release(inode, file);
841 }
842 
843 static const struct file_operations i915_error_state_fops = {
844 	.owner = THIS_MODULE,
845 	.open = i915_error_state_open,
846 	.read = seq_read,
847 	.write = i915_error_state_write,
848 	.llseek = default_llseek,
849 	.release = i915_error_state_release,
850 };
851 
852 static ssize_t
853 i915_next_seqno_read(struct file *filp,
854 		 char __user *ubuf,
855 		 size_t max,
856 		 loff_t *ppos)
857 {
858 	struct drm_device *dev = filp->private_data;
859 	drm_i915_private_t *dev_priv = dev->dev_private;
860 	char buf[80];
861 	int len;
862 	int ret;
863 
864 	ret = mutex_lock_interruptible(&dev->struct_mutex);
865 	if (ret)
866 		return ret;
867 
868 	len = snprintf(buf, sizeof(buf),
869 		       "next_seqno :  0x%x\n",
870 		       dev_priv->next_seqno);
871 
872 	mutex_unlock(&dev->struct_mutex);
873 
874 	if (len > sizeof(buf))
875 		len = sizeof(buf);
876 
877 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
878 }
879 
880 static ssize_t
881 i915_next_seqno_write(struct file *filp,
882 		      const char __user *ubuf,
883 		      size_t cnt,
884 		      loff_t *ppos)
885 {
886 	struct drm_device *dev = filp->private_data;
887 	char buf[20];
888 	u32 val = 1;
889 	int ret;
890 
891 	if (cnt > 0) {
892 		if (cnt > sizeof(buf) - 1)
893 			return -EINVAL;
894 
895 		if (copy_from_user(buf, ubuf, cnt))
896 			return -EFAULT;
897 		buf[cnt] = 0;
898 
899 		ret = kstrtouint(buf, 0, &val);
900 		if (ret < 0)
901 			return ret;
902 	}
903 
904 	ret = mutex_lock_interruptible(&dev->struct_mutex);
905 	if (ret)
906 		return ret;
907 
908 	ret = i915_gem_set_seqno(dev, val);
909 
910 	mutex_unlock(&dev->struct_mutex);
911 
912 	return ret ?: cnt;
913 }
914 
915 static const struct file_operations i915_next_seqno_fops = {
916 	.owner = THIS_MODULE,
917 	.open = simple_open,
918 	.read = i915_next_seqno_read,
919 	.write = i915_next_seqno_write,
920 	.llseek = default_llseek,
921 };
922 
923 static int i915_rstdby_delays(struct seq_file *m, void *unused)
924 {
925 	struct drm_info_node *node = (struct drm_info_node *) m->private;
926 	struct drm_device *dev = node->minor->dev;
927 	drm_i915_private_t *dev_priv = dev->dev_private;
928 	u16 crstanddelay;
929 	int ret;
930 
931 	ret = mutex_lock_interruptible(&dev->struct_mutex);
932 	if (ret)
933 		return ret;
934 
935 	crstanddelay = I915_READ16(CRSTANDVID);
936 
937 	mutex_unlock(&dev->struct_mutex);
938 
939 	seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
940 
941 	return 0;
942 }
943 
944 static int i915_cur_delayinfo(struct seq_file *m, void *unused)
945 {
946 	struct drm_info_node *node = (struct drm_info_node *) m->private;
947 	struct drm_device *dev = node->minor->dev;
948 	drm_i915_private_t *dev_priv = dev->dev_private;
949 	int ret;
950 
951 	if (IS_GEN5(dev)) {
952 		u16 rgvswctl = I915_READ16(MEMSWCTL);
953 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
954 
955 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
956 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
957 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
958 			   MEMSTAT_VID_SHIFT);
959 		seq_printf(m, "Current P-state: %d\n",
960 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
961 	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
962 		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
963 		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
964 		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
965 		u32 rpstat, cagf;
966 		u32 rpupei, rpcurup, rpprevup;
967 		u32 rpdownei, rpcurdown, rpprevdown;
968 		int max_freq;
969 
970 		/* RPSTAT1 is in the GT power well */
971 		ret = mutex_lock_interruptible(&dev->struct_mutex);
972 		if (ret)
973 			return ret;
974 
975 		gen6_gt_force_wake_get(dev_priv);
976 
977 		rpstat = I915_READ(GEN6_RPSTAT1);
978 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
979 		rpcurup = I915_READ(GEN6_RP_CUR_UP);
980 		rpprevup = I915_READ(GEN6_RP_PREV_UP);
981 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
982 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
983 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
984 		if (IS_HASWELL(dev))
985 			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
986 		else
987 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
988 		cagf *= GT_FREQUENCY_MULTIPLIER;
989 
990 		gen6_gt_force_wake_put(dev_priv);
991 		mutex_unlock(&dev->struct_mutex);
992 
993 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
994 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
995 		seq_printf(m, "Render p-state ratio: %d\n",
996 			   (gt_perf_status & 0xff00) >> 8);
997 		seq_printf(m, "Render p-state VID: %d\n",
998 			   gt_perf_status & 0xff);
999 		seq_printf(m, "Render p-state limit: %d\n",
1000 			   rp_state_limits & 0xff);
1001 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1002 		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1003 			   GEN6_CURICONT_MASK);
1004 		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1005 			   GEN6_CURBSYTAVG_MASK);
1006 		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1007 			   GEN6_CURBSYTAVG_MASK);
1008 		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1009 			   GEN6_CURIAVG_MASK);
1010 		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1011 			   GEN6_CURBSYTAVG_MASK);
1012 		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1013 			   GEN6_CURBSYTAVG_MASK);
1014 
1015 		max_freq = (rp_state_cap & 0xff0000) >> 16;
1016 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1017 			   max_freq * GT_FREQUENCY_MULTIPLIER);
1018 
1019 		max_freq = (rp_state_cap & 0xff00) >> 8;
1020 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1021 			   max_freq * GT_FREQUENCY_MULTIPLIER);
1022 
1023 		max_freq = rp_state_cap & 0xff;
1024 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1025 			   max_freq * GT_FREQUENCY_MULTIPLIER);
1026 	} else {
1027 		seq_printf(m, "no P-state info available\n");
1028 	}
1029 
1030 	return 0;
1031 }
1032 
1033 static int i915_delayfreq_table(struct seq_file *m, void *unused)
1034 {
1035 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1036 	struct drm_device *dev = node->minor->dev;
1037 	drm_i915_private_t *dev_priv = dev->dev_private;
1038 	u32 delayfreq;
1039 	int ret, i;
1040 
1041 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1042 	if (ret)
1043 		return ret;
1044 
1045 	for (i = 0; i < 16; i++) {
1046 		delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
1047 		seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
1048 			   (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
1049 	}
1050 
1051 	mutex_unlock(&dev->struct_mutex);
1052 
1053 	return 0;
1054 }
1055 
1056 static inline int MAP_TO_MV(int map)
1057 {
1058 	return 1250 - (map * 25);
1059 }
1060 
1061 static int i915_inttoext_table(struct seq_file *m, void *unused)
1062 {
1063 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1064 	struct drm_device *dev = node->minor->dev;
1065 	drm_i915_private_t *dev_priv = dev->dev_private;
1066 	u32 inttoext;
1067 	int ret, i;
1068 
1069 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1070 	if (ret)
1071 		return ret;
1072 
1073 	for (i = 1; i <= 32; i++) {
1074 		inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
1075 		seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
1076 	}
1077 
1078 	mutex_unlock(&dev->struct_mutex);
1079 
1080 	return 0;
1081 }
1082 
1083 static int ironlake_drpc_info(struct seq_file *m)
1084 {
1085 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1086 	struct drm_device *dev = node->minor->dev;
1087 	drm_i915_private_t *dev_priv = dev->dev_private;
1088 	u32 rgvmodectl, rstdbyctl;
1089 	u16 crstandvid;
1090 	int ret;
1091 
1092 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1093 	if (ret)
1094 		return ret;
1095 
1096 	rgvmodectl = I915_READ(MEMMODECTL);
1097 	rstdbyctl = I915_READ(RSTDBYCTL);
1098 	crstandvid = I915_READ16(CRSTANDVID);
1099 
1100 	mutex_unlock(&dev->struct_mutex);
1101 
1102 	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1103 		   "yes" : "no");
1104 	seq_printf(m, "Boost freq: %d\n",
1105 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1106 		   MEMMODE_BOOST_FREQ_SHIFT);
1107 	seq_printf(m, "HW control enabled: %s\n",
1108 		   rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1109 	seq_printf(m, "SW control enabled: %s\n",
1110 		   rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1111 	seq_printf(m, "Gated voltage change: %s\n",
1112 		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1113 	seq_printf(m, "Starting frequency: P%d\n",
1114 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1115 	seq_printf(m, "Max P-state: P%d\n",
1116 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1117 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1118 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1119 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1120 	seq_printf(m, "Render standby enabled: %s\n",
1121 		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1122 	seq_printf(m, "Current RS state: ");
1123 	switch (rstdbyctl & RSX_STATUS_MASK) {
1124 	case RSX_STATUS_ON:
1125 		seq_printf(m, "on\n");
1126 		break;
1127 	case RSX_STATUS_RC1:
1128 		seq_printf(m, "RC1\n");
1129 		break;
1130 	case RSX_STATUS_RC1E:
1131 		seq_printf(m, "RC1E\n");
1132 		break;
1133 	case RSX_STATUS_RS1:
1134 		seq_printf(m, "RS1\n");
1135 		break;
1136 	case RSX_STATUS_RS2:
1137 		seq_printf(m, "RS2 (RC6)\n");
1138 		break;
1139 	case RSX_STATUS_RS3:
1140 		seq_printf(m, "RC3 (RC6+)\n");
1141 		break;
1142 	default:
1143 		seq_printf(m, "unknown\n");
1144 		break;
1145 	}
1146 
1147 	return 0;
1148 }
1149 
1150 static int gen6_drpc_info(struct seq_file *m)
1151 {
1152 
1153 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1154 	struct drm_device *dev = node->minor->dev;
1155 	struct drm_i915_private *dev_priv = dev->dev_private;
1156 	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1157 	unsigned forcewake_count;
1158 	int count=0, ret;
1159 
1160 
1161 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1162 	if (ret)
1163 		return ret;
1164 
1165 	spin_lock_irq(&dev_priv->gt_lock);
1166 	forcewake_count = dev_priv->forcewake_count;
1167 	spin_unlock_irq(&dev_priv->gt_lock);
1168 
1169 	if (forcewake_count) {
1170 		seq_printf(m, "RC information inaccurate because somebody "
1171 			      "holds a forcewake reference \n");
1172 	} else {
1173 		/* NB: we cannot use forcewake, else we read the wrong values */
1174 		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1175 			udelay(10);
1176 		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1177 	}
1178 
1179 	gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1180 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
1181 
1182 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1183 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1184 	mutex_unlock(&dev->struct_mutex);
1185 	mutex_lock(&dev_priv->rps.hw_lock);
1186 	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1187 	mutex_unlock(&dev_priv->rps.hw_lock);
1188 
1189 	seq_printf(m, "Video Turbo Mode: %s\n",
1190 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1191 	seq_printf(m, "HW control enabled: %s\n",
1192 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1193 	seq_printf(m, "SW control enabled: %s\n",
1194 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1195 			  GEN6_RP_MEDIA_SW_MODE));
1196 	seq_printf(m, "RC1e Enabled: %s\n",
1197 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1198 	seq_printf(m, "RC6 Enabled: %s\n",
1199 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1200 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1201 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1202 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1203 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1204 	seq_printf(m, "Current RC state: ");
1205 	switch (gt_core_status & GEN6_RCn_MASK) {
1206 	case GEN6_RC0:
1207 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1208 			seq_printf(m, "Core Power Down\n");
1209 		else
1210 			seq_printf(m, "on\n");
1211 		break;
1212 	case GEN6_RC3:
1213 		seq_printf(m, "RC3\n");
1214 		break;
1215 	case GEN6_RC6:
1216 		seq_printf(m, "RC6\n");
1217 		break;
1218 	case GEN6_RC7:
1219 		seq_printf(m, "RC7\n");
1220 		break;
1221 	default:
1222 		seq_printf(m, "Unknown\n");
1223 		break;
1224 	}
1225 
1226 	seq_printf(m, "Core Power Down: %s\n",
1227 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1228 
1229 	/* Not exactly sure what this is */
1230 	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1231 		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1232 	seq_printf(m, "RC6 residency since boot: %u\n",
1233 		   I915_READ(GEN6_GT_GFX_RC6));
1234 	seq_printf(m, "RC6+ residency since boot: %u\n",
1235 		   I915_READ(GEN6_GT_GFX_RC6p));
1236 	seq_printf(m, "RC6++ residency since boot: %u\n",
1237 		   I915_READ(GEN6_GT_GFX_RC6pp));
1238 
1239 	seq_printf(m, "RC6   voltage: %dmV\n",
1240 		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1241 	seq_printf(m, "RC6+  voltage: %dmV\n",
1242 		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1243 	seq_printf(m, "RC6++ voltage: %dmV\n",
1244 		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1245 	return 0;
1246 }
1247 
1248 static int i915_drpc_info(struct seq_file *m, void *unused)
1249 {
1250 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1251 	struct drm_device *dev = node->minor->dev;
1252 
1253 	if (IS_GEN6(dev) || IS_GEN7(dev))
1254 		return gen6_drpc_info(m);
1255 	else
1256 		return ironlake_drpc_info(m);
1257 }
1258 
1259 static int i915_fbc_status(struct seq_file *m, void *unused)
1260 {
1261 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1262 	struct drm_device *dev = node->minor->dev;
1263 	drm_i915_private_t *dev_priv = dev->dev_private;
1264 
1265 	if (!I915_HAS_FBC(dev)) {
1266 		seq_printf(m, "FBC unsupported on this chipset\n");
1267 		return 0;
1268 	}
1269 
1270 	if (intel_fbc_enabled(dev)) {
1271 		seq_printf(m, "FBC enabled\n");
1272 	} else {
1273 		seq_printf(m, "FBC disabled: ");
1274 		switch (dev_priv->no_fbc_reason) {
1275 		case FBC_NO_OUTPUT:
1276 			seq_printf(m, "no outputs");
1277 			break;
1278 		case FBC_STOLEN_TOO_SMALL:
1279 			seq_printf(m, "not enough stolen memory");
1280 			break;
1281 		case FBC_UNSUPPORTED_MODE:
1282 			seq_printf(m, "mode not supported");
1283 			break;
1284 		case FBC_MODE_TOO_LARGE:
1285 			seq_printf(m, "mode too large");
1286 			break;
1287 		case FBC_BAD_PLANE:
1288 			seq_printf(m, "FBC unsupported on plane");
1289 			break;
1290 		case FBC_NOT_TILED:
1291 			seq_printf(m, "scanout buffer not tiled");
1292 			break;
1293 		case FBC_MULTIPLE_PIPES:
1294 			seq_printf(m, "multiple pipes are enabled");
1295 			break;
1296 		case FBC_MODULE_PARAM:
1297 			seq_printf(m, "disabled per module param (default off)");
1298 			break;
1299 		default:
1300 			seq_printf(m, "unknown reason");
1301 		}
1302 		seq_printf(m, "\n");
1303 	}
1304 	return 0;
1305 }
1306 
1307 static int i915_sr_status(struct seq_file *m, void *unused)
1308 {
1309 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1310 	struct drm_device *dev = node->minor->dev;
1311 	drm_i915_private_t *dev_priv = dev->dev_private;
1312 	bool sr_enabled = false;
1313 
1314 	if (HAS_PCH_SPLIT(dev))
1315 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1316 	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
1317 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1318 	else if (IS_I915GM(dev))
1319 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1320 	else if (IS_PINEVIEW(dev))
1321 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1322 
1323 	seq_printf(m, "self-refresh: %s\n",
1324 		   sr_enabled ? "enabled" : "disabled");
1325 
1326 	return 0;
1327 }
1328 
1329 static int i915_emon_status(struct seq_file *m, void *unused)
1330 {
1331 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1332 	struct drm_device *dev = node->minor->dev;
1333 	drm_i915_private_t *dev_priv = dev->dev_private;
1334 	unsigned long temp, chipset, gfx;
1335 	int ret;
1336 
1337 	if (!IS_GEN5(dev))
1338 		return -ENODEV;
1339 
1340 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1341 	if (ret)
1342 		return ret;
1343 
1344 	temp = i915_mch_val(dev_priv);
1345 	chipset = i915_chipset_val(dev_priv);
1346 	gfx = i915_gfx_val(dev_priv);
1347 	mutex_unlock(&dev->struct_mutex);
1348 
1349 	seq_printf(m, "GMCH temp: %ld\n", temp);
1350 	seq_printf(m, "Chipset power: %ld\n", chipset);
1351 	seq_printf(m, "GFX power: %ld\n", gfx);
1352 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1353 
1354 	return 0;
1355 }
1356 
1357 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1358 {
1359 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1360 	struct drm_device *dev = node->minor->dev;
1361 	drm_i915_private_t *dev_priv = dev->dev_private;
1362 	int ret;
1363 	int gpu_freq, ia_freq;
1364 
1365 	if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
1366 		seq_printf(m, "unsupported on this chipset\n");
1367 		return 0;
1368 	}
1369 
1370 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1371 	if (ret)
1372 		return ret;
1373 
1374 	seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
1375 
1376 	for (gpu_freq = dev_priv->rps.min_delay;
1377 	     gpu_freq <= dev_priv->rps.max_delay;
1378 	     gpu_freq++) {
1379 		ia_freq = gpu_freq;
1380 		sandybridge_pcode_read(dev_priv,
1381 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1382 				       &ia_freq);
1383 		seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
1384 	}
1385 
1386 	mutex_unlock(&dev_priv->rps.hw_lock);
1387 
1388 	return 0;
1389 }
1390 
1391 static int i915_gfxec(struct seq_file *m, void *unused)
1392 {
1393 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1394 	struct drm_device *dev = node->minor->dev;
1395 	drm_i915_private_t *dev_priv = dev->dev_private;
1396 	int ret;
1397 
1398 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1399 	if (ret)
1400 		return ret;
1401 
1402 	seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1403 
1404 	mutex_unlock(&dev->struct_mutex);
1405 
1406 	return 0;
1407 }
1408 
1409 static int i915_opregion(struct seq_file *m, void *unused)
1410 {
1411 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1412 	struct drm_device *dev = node->minor->dev;
1413 	drm_i915_private_t *dev_priv = dev->dev_private;
1414 	struct intel_opregion *opregion = &dev_priv->opregion;
1415 	void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1416 	int ret;
1417 
1418 	if (data == NULL)
1419 		return -ENOMEM;
1420 
1421 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1422 	if (ret)
1423 		goto out;
1424 
1425 	if (opregion->header) {
1426 		memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1427 		seq_write(m, data, OPREGION_SIZE);
1428 	}
1429 
1430 	mutex_unlock(&dev->struct_mutex);
1431 
1432 out:
1433 	kfree(data);
1434 	return 0;
1435 }
1436 
1437 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1438 {
1439 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1440 	struct drm_device *dev = node->minor->dev;
1441 	drm_i915_private_t *dev_priv = dev->dev_private;
1442 	struct intel_fbdev *ifbdev;
1443 	struct intel_framebuffer *fb;
1444 	int ret;
1445 
1446 	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1447 	if (ret)
1448 		return ret;
1449 
1450 	ifbdev = dev_priv->fbdev;
1451 	fb = to_intel_framebuffer(ifbdev->helper.fb);
1452 
1453 	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1454 		   fb->base.width,
1455 		   fb->base.height,
1456 		   fb->base.depth,
1457 		   fb->base.bits_per_pixel,
1458 		   atomic_read(&fb->base.refcount.refcount));
1459 	describe_obj(m, fb->obj);
1460 	seq_printf(m, "\n");
1461 	mutex_unlock(&dev->mode_config.mutex);
1462 
1463 	mutex_lock(&dev->mode_config.fb_lock);
1464 	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1465 		if (&fb->base == ifbdev->helper.fb)
1466 			continue;
1467 
1468 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1469 			   fb->base.width,
1470 			   fb->base.height,
1471 			   fb->base.depth,
1472 			   fb->base.bits_per_pixel,
1473 			   atomic_read(&fb->base.refcount.refcount));
1474 		describe_obj(m, fb->obj);
1475 		seq_printf(m, "\n");
1476 	}
1477 	mutex_unlock(&dev->mode_config.fb_lock);
1478 
1479 	return 0;
1480 }
1481 
1482 static int i915_context_status(struct seq_file *m, void *unused)
1483 {
1484 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1485 	struct drm_device *dev = node->minor->dev;
1486 	drm_i915_private_t *dev_priv = dev->dev_private;
1487 	struct intel_ring_buffer *ring;
1488 	int ret, i;
1489 
1490 	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1491 	if (ret)
1492 		return ret;
1493 
1494 	if (dev_priv->ips.pwrctx) {
1495 		seq_printf(m, "power context ");
1496 		describe_obj(m, dev_priv->ips.pwrctx);
1497 		seq_printf(m, "\n");
1498 	}
1499 
1500 	if (dev_priv->ips.renderctx) {
1501 		seq_printf(m, "render context ");
1502 		describe_obj(m, dev_priv->ips.renderctx);
1503 		seq_printf(m, "\n");
1504 	}
1505 
1506 	for_each_ring(ring, dev_priv, i) {
1507 		if (ring->default_context) {
1508 			seq_printf(m, "HW default context %s ring ", ring->name);
1509 			describe_obj(m, ring->default_context->obj);
1510 			seq_printf(m, "\n");
1511 		}
1512 	}
1513 
1514 	mutex_unlock(&dev->mode_config.mutex);
1515 
1516 	return 0;
1517 }
1518 
1519 static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
1520 {
1521 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1522 	struct drm_device *dev = node->minor->dev;
1523 	struct drm_i915_private *dev_priv = dev->dev_private;
1524 	unsigned forcewake_count;
1525 
1526 	spin_lock_irq(&dev_priv->gt_lock);
1527 	forcewake_count = dev_priv->forcewake_count;
1528 	spin_unlock_irq(&dev_priv->gt_lock);
1529 
1530 	seq_printf(m, "forcewake count = %u\n", forcewake_count);
1531 
1532 	return 0;
1533 }
1534 
1535 static const char *swizzle_string(unsigned swizzle)
1536 {
1537 	switch(swizzle) {
1538 	case I915_BIT_6_SWIZZLE_NONE:
1539 		return "none";
1540 	case I915_BIT_6_SWIZZLE_9:
1541 		return "bit9";
1542 	case I915_BIT_6_SWIZZLE_9_10:
1543 		return "bit9/bit10";
1544 	case I915_BIT_6_SWIZZLE_9_11:
1545 		return "bit9/bit11";
1546 	case I915_BIT_6_SWIZZLE_9_10_11:
1547 		return "bit9/bit10/bit11";
1548 	case I915_BIT_6_SWIZZLE_9_17:
1549 		return "bit9/bit17";
1550 	case I915_BIT_6_SWIZZLE_9_10_17:
1551 		return "bit9/bit10/bit17";
1552 	case I915_BIT_6_SWIZZLE_UNKNOWN:
1553 		return "unknown";
1554 	}
1555 
1556 	return "bug";
1557 }
1558 
1559 static int i915_swizzle_info(struct seq_file *m, void *data)
1560 {
1561 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1562 	struct drm_device *dev = node->minor->dev;
1563 	struct drm_i915_private *dev_priv = dev->dev_private;
1564 	int ret;
1565 
1566 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1567 	if (ret)
1568 		return ret;
1569 
1570 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1571 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1572 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1573 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1574 
1575 	if (IS_GEN3(dev) || IS_GEN4(dev)) {
1576 		seq_printf(m, "DDC = 0x%08x\n",
1577 			   I915_READ(DCC));
1578 		seq_printf(m, "C0DRB3 = 0x%04x\n",
1579 			   I915_READ16(C0DRB3));
1580 		seq_printf(m, "C1DRB3 = 0x%04x\n",
1581 			   I915_READ16(C1DRB3));
1582 	} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
1583 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1584 			   I915_READ(MAD_DIMM_C0));
1585 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1586 			   I915_READ(MAD_DIMM_C1));
1587 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1588 			   I915_READ(MAD_DIMM_C2));
1589 		seq_printf(m, "TILECTL = 0x%08x\n",
1590 			   I915_READ(TILECTL));
1591 		seq_printf(m, "ARB_MODE = 0x%08x\n",
1592 			   I915_READ(ARB_MODE));
1593 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1594 			   I915_READ(DISP_ARB_CTL));
1595 	}
1596 	mutex_unlock(&dev->struct_mutex);
1597 
1598 	return 0;
1599 }
1600 
1601 static int i915_ppgtt_info(struct seq_file *m, void *data)
1602 {
1603 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1604 	struct drm_device *dev = node->minor->dev;
1605 	struct drm_i915_private *dev_priv = dev->dev_private;
1606 	struct intel_ring_buffer *ring;
1607 	int i, ret;
1608 
1609 
1610 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1611 	if (ret)
1612 		return ret;
1613 	if (INTEL_INFO(dev)->gen == 6)
1614 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
1615 
1616 	for_each_ring(ring, dev_priv, i) {
1617 		seq_printf(m, "%s\n", ring->name);
1618 		if (INTEL_INFO(dev)->gen == 7)
1619 			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
1620 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
1621 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
1622 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
1623 	}
1624 	if (dev_priv->mm.aliasing_ppgtt) {
1625 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1626 
1627 		seq_printf(m, "aliasing PPGTT:\n");
1628 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
1629 	}
1630 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
1631 	mutex_unlock(&dev->struct_mutex);
1632 
1633 	return 0;
1634 }
1635 
1636 static int i915_dpio_info(struct seq_file *m, void *data)
1637 {
1638 	struct drm_info_node *node = (struct drm_info_node *) m->private;
1639 	struct drm_device *dev = node->minor->dev;
1640 	struct drm_i915_private *dev_priv = dev->dev_private;
1641 	int ret;
1642 
1643 
1644 	if (!IS_VALLEYVIEW(dev)) {
1645 		seq_printf(m, "unsupported\n");
1646 		return 0;
1647 	}
1648 
1649 	ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1650 	if (ret)
1651 		return ret;
1652 
1653 	seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
1654 
1655 	seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
1656 		   intel_dpio_read(dev_priv, _DPIO_DIV_A));
1657 	seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
1658 		   intel_dpio_read(dev_priv, _DPIO_DIV_B));
1659 
1660 	seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
1661 		   intel_dpio_read(dev_priv, _DPIO_REFSFR_A));
1662 	seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
1663 		   intel_dpio_read(dev_priv, _DPIO_REFSFR_B));
1664 
1665 	seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
1666 		   intel_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
1667 	seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
1668 		   intel_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
1669 
1670 	seq_printf(m, "DPIO_LFP_COEFF_A: 0x%08x\n",
1671 		   intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_A));
1672 	seq_printf(m, "DPIO_LFP_COEFF_B: 0x%08x\n",
1673 		   intel_dpio_read(dev_priv, _DPIO_LFP_COEFF_B));
1674 
1675 	seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1676 		   intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1677 
1678 	mutex_unlock(&dev_priv->dpio_lock);
1679 
1680 	return 0;
1681 }
1682 
1683 static ssize_t
1684 i915_wedged_read(struct file *filp,
1685 		 char __user *ubuf,
1686 		 size_t max,
1687 		 loff_t *ppos)
1688 {
1689 	struct drm_device *dev = filp->private_data;
1690 	drm_i915_private_t *dev_priv = dev->dev_private;
1691 	char buf[80];
1692 	int len;
1693 
1694 	len = snprintf(buf, sizeof(buf),
1695 		       "wedged :  %d\n",
1696 		       atomic_read(&dev_priv->gpu_error.reset_counter));
1697 
1698 	if (len > sizeof(buf))
1699 		len = sizeof(buf);
1700 
1701 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1702 }
1703 
1704 static ssize_t
1705 i915_wedged_write(struct file *filp,
1706 		  const char __user *ubuf,
1707 		  size_t cnt,
1708 		  loff_t *ppos)
1709 {
1710 	struct drm_device *dev = filp->private_data;
1711 	char buf[20];
1712 	int val = 1;
1713 
1714 	if (cnt > 0) {
1715 		if (cnt > sizeof(buf) - 1)
1716 			return -EINVAL;
1717 
1718 		if (copy_from_user(buf, ubuf, cnt))
1719 			return -EFAULT;
1720 		buf[cnt] = 0;
1721 
1722 		val = simple_strtoul(buf, NULL, 0);
1723 	}
1724 
1725 	DRM_INFO("Manually setting wedged to %d\n", val);
1726 	i915_handle_error(dev, val);
1727 
1728 	return cnt;
1729 }
1730 
1731 static const struct file_operations i915_wedged_fops = {
1732 	.owner = THIS_MODULE,
1733 	.open = simple_open,
1734 	.read = i915_wedged_read,
1735 	.write = i915_wedged_write,
1736 	.llseek = default_llseek,
1737 };
1738 
1739 static ssize_t
1740 i915_ring_stop_read(struct file *filp,
1741 		    char __user *ubuf,
1742 		    size_t max,
1743 		    loff_t *ppos)
1744 {
1745 	struct drm_device *dev = filp->private_data;
1746 	drm_i915_private_t *dev_priv = dev->dev_private;
1747 	char buf[20];
1748 	int len;
1749 
1750 	len = snprintf(buf, sizeof(buf),
1751 		       "0x%08x\n", dev_priv->gpu_error.stop_rings);
1752 
1753 	if (len > sizeof(buf))
1754 		len = sizeof(buf);
1755 
1756 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1757 }
1758 
1759 static ssize_t
1760 i915_ring_stop_write(struct file *filp,
1761 		     const char __user *ubuf,
1762 		     size_t cnt,
1763 		     loff_t *ppos)
1764 {
1765 	struct drm_device *dev = filp->private_data;
1766 	struct drm_i915_private *dev_priv = dev->dev_private;
1767 	char buf[20];
1768 	int val = 0, ret;
1769 
1770 	if (cnt > 0) {
1771 		if (cnt > sizeof(buf) - 1)
1772 			return -EINVAL;
1773 
1774 		if (copy_from_user(buf, ubuf, cnt))
1775 			return -EFAULT;
1776 		buf[cnt] = 0;
1777 
1778 		val = simple_strtoul(buf, NULL, 0);
1779 	}
1780 
1781 	DRM_DEBUG_DRIVER("Stopping rings 0x%08x\n", val);
1782 
1783 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1784 	if (ret)
1785 		return ret;
1786 
1787 	dev_priv->gpu_error.stop_rings = val;
1788 	mutex_unlock(&dev->struct_mutex);
1789 
1790 	return cnt;
1791 }
1792 
1793 static const struct file_operations i915_ring_stop_fops = {
1794 	.owner = THIS_MODULE,
1795 	.open = simple_open,
1796 	.read = i915_ring_stop_read,
1797 	.write = i915_ring_stop_write,
1798 	.llseek = default_llseek,
1799 };
1800 
1801 #define DROP_UNBOUND 0x1
1802 #define DROP_BOUND 0x2
1803 #define DROP_RETIRE 0x4
1804 #define DROP_ACTIVE 0x8
1805 #define DROP_ALL (DROP_UNBOUND | \
1806 		  DROP_BOUND | \
1807 		  DROP_RETIRE | \
1808 		  DROP_ACTIVE)
1809 static ssize_t
1810 i915_drop_caches_read(struct file *filp,
1811 		      char __user *ubuf,
1812 		      size_t max,
1813 		      loff_t *ppos)
1814 {
1815 	char buf[20];
1816 	int len;
1817 
1818 	len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
1819 	if (len > sizeof(buf))
1820 		len = sizeof(buf);
1821 
1822 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1823 }
1824 
1825 static ssize_t
1826 i915_drop_caches_write(struct file *filp,
1827 		       const char __user *ubuf,
1828 		       size_t cnt,
1829 		       loff_t *ppos)
1830 {
1831 	struct drm_device *dev = filp->private_data;
1832 	struct drm_i915_private *dev_priv = dev->dev_private;
1833 	struct drm_i915_gem_object *obj, *next;
1834 	char buf[20];
1835 	int val = 0, ret;
1836 
1837 	if (cnt > 0) {
1838 		if (cnt > sizeof(buf) - 1)
1839 			return -EINVAL;
1840 
1841 		if (copy_from_user(buf, ubuf, cnt))
1842 			return -EFAULT;
1843 		buf[cnt] = 0;
1844 
1845 		val = simple_strtoul(buf, NULL, 0);
1846 	}
1847 
1848 	DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
1849 
1850 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
1851 	 * on ioctls on -EAGAIN. */
1852 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1853 	if (ret)
1854 		return ret;
1855 
1856 	if (val & DROP_ACTIVE) {
1857 		ret = i915_gpu_idle(dev);
1858 		if (ret)
1859 			goto unlock;
1860 	}
1861 
1862 	if (val & (DROP_RETIRE | DROP_ACTIVE))
1863 		i915_gem_retire_requests(dev);
1864 
1865 	if (val & DROP_BOUND) {
1866 		list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
1867 			if (obj->pin_count == 0) {
1868 				ret = i915_gem_object_unbind(obj);
1869 				if (ret)
1870 					goto unlock;
1871 			}
1872 	}
1873 
1874 	if (val & DROP_UNBOUND) {
1875 		list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1876 			if (obj->pages_pin_count == 0) {
1877 				ret = i915_gem_object_put_pages(obj);
1878 				if (ret)
1879 					goto unlock;
1880 			}
1881 	}
1882 
1883 unlock:
1884 	mutex_unlock(&dev->struct_mutex);
1885 
1886 	return ret ?: cnt;
1887 }
1888 
1889 static const struct file_operations i915_drop_caches_fops = {
1890 	.owner = THIS_MODULE,
1891 	.open = simple_open,
1892 	.read = i915_drop_caches_read,
1893 	.write = i915_drop_caches_write,
1894 	.llseek = default_llseek,
1895 };
1896 
1897 static ssize_t
1898 i915_max_freq_read(struct file *filp,
1899 		   char __user *ubuf,
1900 		   size_t max,
1901 		   loff_t *ppos)
1902 {
1903 	struct drm_device *dev = filp->private_data;
1904 	drm_i915_private_t *dev_priv = dev->dev_private;
1905 	char buf[80];
1906 	int len, ret;
1907 
1908 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1909 		return -ENODEV;
1910 
1911 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1912 	if (ret)
1913 		return ret;
1914 
1915 	len = snprintf(buf, sizeof(buf),
1916 		       "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
1917 	mutex_unlock(&dev_priv->rps.hw_lock);
1918 
1919 	if (len > sizeof(buf))
1920 		len = sizeof(buf);
1921 
1922 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1923 }
1924 
1925 static ssize_t
1926 i915_max_freq_write(struct file *filp,
1927 		  const char __user *ubuf,
1928 		  size_t cnt,
1929 		  loff_t *ppos)
1930 {
1931 	struct drm_device *dev = filp->private_data;
1932 	struct drm_i915_private *dev_priv = dev->dev_private;
1933 	char buf[20];
1934 	int val = 1, ret;
1935 
1936 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1937 		return -ENODEV;
1938 
1939 	if (cnt > 0) {
1940 		if (cnt > sizeof(buf) - 1)
1941 			return -EINVAL;
1942 
1943 		if (copy_from_user(buf, ubuf, cnt))
1944 			return -EFAULT;
1945 		buf[cnt] = 0;
1946 
1947 		val = simple_strtoul(buf, NULL, 0);
1948 	}
1949 
1950 	DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1951 
1952 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1953 	if (ret)
1954 		return ret;
1955 
1956 	/*
1957 	 * Turbo will still be enabled, but won't go above the set value.
1958 	 */
1959 	dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
1960 
1961 	gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
1962 	mutex_unlock(&dev_priv->rps.hw_lock);
1963 
1964 	return cnt;
1965 }
1966 
1967 static const struct file_operations i915_max_freq_fops = {
1968 	.owner = THIS_MODULE,
1969 	.open = simple_open,
1970 	.read = i915_max_freq_read,
1971 	.write = i915_max_freq_write,
1972 	.llseek = default_llseek,
1973 };
1974 
1975 static ssize_t
1976 i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
1977 		   loff_t *ppos)
1978 {
1979 	struct drm_device *dev = filp->private_data;
1980 	drm_i915_private_t *dev_priv = dev->dev_private;
1981 	char buf[80];
1982 	int len, ret;
1983 
1984 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1985 		return -ENODEV;
1986 
1987 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1988 	if (ret)
1989 		return ret;
1990 
1991 	len = snprintf(buf, sizeof(buf),
1992 		       "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
1993 	mutex_unlock(&dev_priv->rps.hw_lock);
1994 
1995 	if (len > sizeof(buf))
1996 		len = sizeof(buf);
1997 
1998 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1999 }
2000 
2001 static ssize_t
2002 i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
2003 		    loff_t *ppos)
2004 {
2005 	struct drm_device *dev = filp->private_data;
2006 	struct drm_i915_private *dev_priv = dev->dev_private;
2007 	char buf[20];
2008 	int val = 1, ret;
2009 
2010 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2011 		return -ENODEV;
2012 
2013 	if (cnt > 0) {
2014 		if (cnt > sizeof(buf) - 1)
2015 			return -EINVAL;
2016 
2017 		if (copy_from_user(buf, ubuf, cnt))
2018 			return -EFAULT;
2019 		buf[cnt] = 0;
2020 
2021 		val = simple_strtoul(buf, NULL, 0);
2022 	}
2023 
2024 	DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
2025 
2026 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
2027 	if (ret)
2028 		return ret;
2029 
2030 	/*
2031 	 * Turbo will still be enabled, but won't go below the set value.
2032 	 */
2033 	dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
2034 
2035 	gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
2036 	mutex_unlock(&dev_priv->rps.hw_lock);
2037 
2038 	return cnt;
2039 }
2040 
2041 static const struct file_operations i915_min_freq_fops = {
2042 	.owner = THIS_MODULE,
2043 	.open = simple_open,
2044 	.read = i915_min_freq_read,
2045 	.write = i915_min_freq_write,
2046 	.llseek = default_llseek,
2047 };
2048 
2049 static ssize_t
2050 i915_cache_sharing_read(struct file *filp,
2051 		   char __user *ubuf,
2052 		   size_t max,
2053 		   loff_t *ppos)
2054 {
2055 	struct drm_device *dev = filp->private_data;
2056 	drm_i915_private_t *dev_priv = dev->dev_private;
2057 	char buf[80];
2058 	u32 snpcr;
2059 	int len, ret;
2060 
2061 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2062 		return -ENODEV;
2063 
2064 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2065 	if (ret)
2066 		return ret;
2067 
2068 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2069 	mutex_unlock(&dev_priv->dev->struct_mutex);
2070 
2071 	len = snprintf(buf, sizeof(buf),
2072 		       "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
2073 		       GEN6_MBC_SNPCR_SHIFT);
2074 
2075 	if (len > sizeof(buf))
2076 		len = sizeof(buf);
2077 
2078 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
2079 }
2080 
2081 static ssize_t
2082 i915_cache_sharing_write(struct file *filp,
2083 		  const char __user *ubuf,
2084 		  size_t cnt,
2085 		  loff_t *ppos)
2086 {
2087 	struct drm_device *dev = filp->private_data;
2088 	struct drm_i915_private *dev_priv = dev->dev_private;
2089 	char buf[20];
2090 	u32 snpcr;
2091 	int val = 1;
2092 
2093 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
2094 		return -ENODEV;
2095 
2096 	if (cnt > 0) {
2097 		if (cnt > sizeof(buf) - 1)
2098 			return -EINVAL;
2099 
2100 		if (copy_from_user(buf, ubuf, cnt))
2101 			return -EFAULT;
2102 		buf[cnt] = 0;
2103 
2104 		val = simple_strtoul(buf, NULL, 0);
2105 	}
2106 
2107 	if (val < 0 || val > 3)
2108 		return -EINVAL;
2109 
2110 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %d\n", val);
2111 
2112 	/* Update the cache sharing policy here as well */
2113 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
2114 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
2115 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
2116 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
2117 
2118 	return cnt;
2119 }
2120 
2121 static const struct file_operations i915_cache_sharing_fops = {
2122 	.owner = THIS_MODULE,
2123 	.open = simple_open,
2124 	.read = i915_cache_sharing_read,
2125 	.write = i915_cache_sharing_write,
2126 	.llseek = default_llseek,
2127 };
2128 
2129 /* As the drm_debugfs_init() routines are called before dev->dev_private is
2130  * allocated we need to hook into the minor for release. */
2131 static int
2132 drm_add_fake_info_node(struct drm_minor *minor,
2133 		       struct dentry *ent,
2134 		       const void *key)
2135 {
2136 	struct drm_info_node *node;
2137 
2138 	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
2139 	if (node == NULL) {
2140 		debugfs_remove(ent);
2141 		return -ENOMEM;
2142 	}
2143 
2144 	node->minor = minor;
2145 	node->dent = ent;
2146 	node->info_ent = (void *) key;
2147 
2148 	mutex_lock(&minor->debugfs_lock);
2149 	list_add(&node->list, &minor->debugfs_list);
2150 	mutex_unlock(&minor->debugfs_lock);
2151 
2152 	return 0;
2153 }
2154 
2155 static int i915_forcewake_open(struct inode *inode, struct file *file)
2156 {
2157 	struct drm_device *dev = inode->i_private;
2158 	struct drm_i915_private *dev_priv = dev->dev_private;
2159 
2160 	if (INTEL_INFO(dev)->gen < 6)
2161 		return 0;
2162 
2163 	gen6_gt_force_wake_get(dev_priv);
2164 
2165 	return 0;
2166 }
2167 
2168 static int i915_forcewake_release(struct inode *inode, struct file *file)
2169 {
2170 	struct drm_device *dev = inode->i_private;
2171 	struct drm_i915_private *dev_priv = dev->dev_private;
2172 
2173 	if (INTEL_INFO(dev)->gen < 6)
2174 		return 0;
2175 
2176 	gen6_gt_force_wake_put(dev_priv);
2177 
2178 	return 0;
2179 }
2180 
2181 static const struct file_operations i915_forcewake_fops = {
2182 	.owner = THIS_MODULE,
2183 	.open = i915_forcewake_open,
2184 	.release = i915_forcewake_release,
2185 };
2186 
2187 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
2188 {
2189 	struct drm_device *dev = minor->dev;
2190 	struct dentry *ent;
2191 
2192 	ent = debugfs_create_file("i915_forcewake_user",
2193 				  S_IRUSR,
2194 				  root, dev,
2195 				  &i915_forcewake_fops);
2196 	if (IS_ERR(ent))
2197 		return PTR_ERR(ent);
2198 
2199 	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
2200 }
2201 
2202 static int i915_debugfs_create(struct dentry *root,
2203 			       struct drm_minor *minor,
2204 			       const char *name,
2205 			       const struct file_operations *fops)
2206 {
2207 	struct drm_device *dev = minor->dev;
2208 	struct dentry *ent;
2209 
2210 	ent = debugfs_create_file(name,
2211 				  S_IRUGO | S_IWUSR,
2212 				  root, dev,
2213 				  fops);
2214 	if (IS_ERR(ent))
2215 		return PTR_ERR(ent);
2216 
2217 	return drm_add_fake_info_node(minor, ent, fops);
2218 }
2219 
2220 static struct drm_info_list i915_debugfs_list[] = {
2221 	{"i915_capabilities", i915_capabilities, 0},
2222 	{"i915_gem_objects", i915_gem_object_info, 0},
2223 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
2224 	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
2225 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
2226 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
2227 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
2228 	{"i915_gem_request", i915_gem_request_info, 0},
2229 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
2230 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2231 	{"i915_gem_interrupt", i915_interrupt_info, 0},
2232 	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
2233 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
2234 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
2235 	{"i915_rstdby_delays", i915_rstdby_delays, 0},
2236 	{"i915_cur_delayinfo", i915_cur_delayinfo, 0},
2237 	{"i915_delayfreq_table", i915_delayfreq_table, 0},
2238 	{"i915_inttoext_table", i915_inttoext_table, 0},
2239 	{"i915_drpc_info", i915_drpc_info, 0},
2240 	{"i915_emon_status", i915_emon_status, 0},
2241 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
2242 	{"i915_gfxec", i915_gfxec, 0},
2243 	{"i915_fbc_status", i915_fbc_status, 0},
2244 	{"i915_sr_status", i915_sr_status, 0},
2245 	{"i915_opregion", i915_opregion, 0},
2246 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2247 	{"i915_context_status", i915_context_status, 0},
2248 	{"i915_gen6_forcewake_count", i915_gen6_forcewake_count_info, 0},
2249 	{"i915_swizzle_info", i915_swizzle_info, 0},
2250 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
2251 	{"i915_dpio", i915_dpio_info, 0},
2252 };
2253 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2254 
2255 int i915_debugfs_init(struct drm_minor *minor)
2256 {
2257 	int ret;
2258 
2259 	ret = i915_debugfs_create(minor->debugfs_root, minor,
2260 				  "i915_wedged",
2261 				  &i915_wedged_fops);
2262 	if (ret)
2263 		return ret;
2264 
2265 	ret = i915_forcewake_create(minor->debugfs_root, minor);
2266 	if (ret)
2267 		return ret;
2268 
2269 	ret = i915_debugfs_create(minor->debugfs_root, minor,
2270 				  "i915_max_freq",
2271 				  &i915_max_freq_fops);
2272 	if (ret)
2273 		return ret;
2274 
2275 	ret = i915_debugfs_create(minor->debugfs_root, minor,
2276 				  "i915_min_freq",
2277 				  &i915_min_freq_fops);
2278 	if (ret)
2279 		return ret;
2280 
2281 	ret = i915_debugfs_create(minor->debugfs_root, minor,
2282 				  "i915_cache_sharing",
2283 				  &i915_cache_sharing_fops);
2284 	if (ret)
2285 		return ret;
2286 
2287 	ret = i915_debugfs_create(minor->debugfs_root, minor,
2288 				  "i915_ring_stop",
2289 				  &i915_ring_stop_fops);
2290 	if (ret)
2291 		return ret;
2292 
2293 	ret = i915_debugfs_create(minor->debugfs_root, minor,
2294 				  "i915_gem_drop_caches",
2295 				  &i915_drop_caches_fops);
2296 	if (ret)
2297 		return ret;
2298 
2299 	ret = i915_debugfs_create(minor->debugfs_root, minor,
2300 				  "i915_error_state",
2301 				  &i915_error_state_fops);
2302 	if (ret)
2303 		return ret;
2304 
2305 	ret = i915_debugfs_create(minor->debugfs_root, minor,
2306 				 "i915_next_seqno",
2307 				 &i915_next_seqno_fops);
2308 	if (ret)
2309 		return ret;
2310 
2311 	return drm_debugfs_create_files(i915_debugfs_list,
2312 					I915_DEBUGFS_ENTRIES,
2313 					minor->debugfs_root, minor);
2314 }
2315 
2316 void i915_debugfs_cleanup(struct drm_minor *minor)
2317 {
2318 	drm_debugfs_remove_files(i915_debugfs_list,
2319 				 I915_DEBUGFS_ENTRIES, minor);
2320 	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
2321 				 1, minor);
2322 	drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
2323 				 1, minor);
2324 	drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
2325 				 1, minor);
2326 	drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
2327 				 1, minor);
2328 	drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
2329 				 1, minor);
2330 	drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
2331 				 1, minor);
2332 	drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2333 				 1, minor);
2334 	drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2335 				 1, minor);
2336 	drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
2337 				 1, minor);
2338 }
2339 
2340 #endif /* CONFIG_DEBUG_FS */
2341