1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/seq_file.h>
30 #include <linux/circ_buf.h>
31 #include <linux/ctype.h>
32 #include <linux/debugfs.h>
33 #include <linux/slab.h>
34 #include <linux/export.h>
35 #include <linux/list_sort.h>
36 #include <asm/msr-index.h>
37 #include <drm/drmP.h>
38 #include "intel_drv.h"
39 #include "intel_ringbuffer.h"
40 #include <drm/i915_drm.h>
41 #include "i915_drv.h"
42 
43 enum {
44 	ACTIVE_LIST,
45 	INACTIVE_LIST,
46 	PINNED_LIST,
47 };
48 
49 static const char *yesno(int v)
50 {
51 	return v ? "yes" : "no";
52 }
53 
54 /* As the drm_debugfs_init() routines are called before dev->dev_private is
55  * allocated we need to hook into the minor for release. */
56 static int
57 drm_add_fake_info_node(struct drm_minor *minor,
58 		       struct dentry *ent,
59 		       const void *key)
60 {
61 	struct drm_info_node *node;
62 
63 	node = kmalloc(sizeof(*node), GFP_KERNEL);
64 	if (node == NULL) {
65 		debugfs_remove(ent);
66 		return -ENOMEM;
67 	}
68 
69 	node->minor = minor;
70 	node->dent = ent;
71 	node->info_ent = (void *) key;
72 
73 	mutex_lock(&minor->debugfs_lock);
74 	list_add(&node->list, &minor->debugfs_list);
75 	mutex_unlock(&minor->debugfs_lock);
76 
77 	return 0;
78 }
79 
80 static int i915_capabilities(struct seq_file *m, void *data)
81 {
82 	struct drm_info_node *node = m->private;
83 	struct drm_device *dev = node->minor->dev;
84 	const struct intel_device_info *info = INTEL_INFO(dev);
85 
86 	seq_printf(m, "gen: %d\n", info->gen);
87 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
88 #define PRINT_FLAG(x)  seq_printf(m, #x ": %s\n", yesno(info->x))
89 #define SEP_SEMICOLON ;
90 	DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_SEMICOLON);
91 #undef PRINT_FLAG
92 #undef SEP_SEMICOLON
93 
94 	return 0;
95 }
96 
97 static const char *get_pin_flag(struct drm_i915_gem_object *obj)
98 {
99 	if (obj->pin_display)
100 		return "p";
101 	else
102 		return " ";
103 }
104 
105 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
106 {
107 	switch (obj->tiling_mode) {
108 	default:
109 	case I915_TILING_NONE: return " ";
110 	case I915_TILING_X: return "X";
111 	case I915_TILING_Y: return "Y";
112 	}
113 }
114 
115 static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
116 {
117 	return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
118 }
119 
120 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
121 {
122 	u64 size = 0;
123 	struct i915_vma *vma;
124 
125 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
126 		if (i915_is_ggtt(vma->vm) &&
127 		    drm_mm_node_allocated(&vma->node))
128 			size += vma->node.size;
129 	}
130 
131 	return size;
132 }
133 
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138 	struct intel_engine_cs *ring;
139 	struct i915_vma *vma;
140 	int pin_count = 0;
141 	int i;
142 
143 	seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
144 		   &obj->base,
145 		   obj->active ? "*" : " ",
146 		   get_pin_flag(obj),
147 		   get_tiling_flag(obj),
148 		   get_global_flag(obj),
149 		   obj->base.size / 1024,
150 		   obj->base.read_domains,
151 		   obj->base.write_domain);
152 	for_each_ring(ring, dev_priv, i)
153 		seq_printf(m, "%x ",
154 				i915_gem_request_get_seqno(obj->last_read_req[i]));
155 	seq_printf(m, "] %x %x%s%s%s",
156 		   i915_gem_request_get_seqno(obj->last_write_req),
157 		   i915_gem_request_get_seqno(obj->last_fenced_req),
158 		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
159 		   obj->dirty ? " dirty" : "",
160 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
161 	if (obj->base.name)
162 		seq_printf(m, " (name: %d)", obj->base.name);
163 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
164 		if (vma->pin_count > 0)
165 			pin_count++;
166 	}
167 	seq_printf(m, " (pinned x %d)", pin_count);
168 	if (obj->pin_display)
169 		seq_printf(m, " (display)");
170 	if (obj->fence_reg != I915_FENCE_REG_NONE)
171 		seq_printf(m, " (fence: %d)", obj->fence_reg);
172 	list_for_each_entry(vma, &obj->vma_list, vma_link) {
173 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
174 			   i915_is_ggtt(vma->vm) ? "g" : "pp",
175 			   vma->node.start, vma->node.size);
176 		if (i915_is_ggtt(vma->vm))
177 			seq_printf(m, ", type: %u)", vma->ggtt_view.type);
178 		else
179 			seq_puts(m, ")");
180 	}
181 	if (obj->stolen)
182 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
183 	if (obj->pin_display || obj->fault_mappable) {
184 		char s[3], *t = s;
185 		if (obj->pin_display)
186 			*t++ = 'p';
187 		if (obj->fault_mappable)
188 			*t++ = 'f';
189 		*t = '\0';
190 		seq_printf(m, " (%s mappable)", s);
191 	}
192 	if (obj->last_write_req != NULL)
193 		seq_printf(m, " (%s)",
194 			   i915_gem_request_get_ring(obj->last_write_req)->name);
195 	if (obj->frontbuffer_bits)
196 		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
197 }
198 
199 static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
200 {
201 	seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
202 	seq_putc(m, ctx->remap_slice ? 'R' : 'r');
203 	seq_putc(m, ' ');
204 }
205 
206 static int i915_gem_object_list_info(struct seq_file *m, void *data)
207 {
208 	struct drm_info_node *node = m->private;
209 	uintptr_t list = (uintptr_t) node->info_ent->data;
210 	struct list_head *head;
211 	struct drm_device *dev = node->minor->dev;
212 	struct drm_i915_private *dev_priv = dev->dev_private;
213 	struct i915_address_space *vm = &dev_priv->gtt.base;
214 	struct i915_vma *vma;
215 	u64 total_obj_size, total_gtt_size;
216 	int count, ret;
217 
218 	ret = mutex_lock_interruptible(&dev->struct_mutex);
219 	if (ret)
220 		return ret;
221 
222 	/* FIXME: the user of this interface might want more than just GGTT */
223 	switch (list) {
224 	case ACTIVE_LIST:
225 		seq_puts(m, "Active:\n");
226 		head = &vm->active_list;
227 		break;
228 	case INACTIVE_LIST:
229 		seq_puts(m, "Inactive:\n");
230 		head = &vm->inactive_list;
231 		break;
232 	default:
233 		mutex_unlock(&dev->struct_mutex);
234 		return -EINVAL;
235 	}
236 
237 	total_obj_size = total_gtt_size = count = 0;
238 	list_for_each_entry(vma, head, mm_list) {
239 		seq_printf(m, "   ");
240 		describe_obj(m, vma->obj);
241 		seq_printf(m, "\n");
242 		total_obj_size += vma->obj->base.size;
243 		total_gtt_size += vma->node.size;
244 		count++;
245 	}
246 	mutex_unlock(&dev->struct_mutex);
247 
248 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
249 		   count, total_obj_size, total_gtt_size);
250 	return 0;
251 }
252 
253 static int obj_rank_by_stolen(void *priv,
254 			      struct list_head *A, struct list_head *B)
255 {
256 	struct drm_i915_gem_object *a =
257 		container_of(A, struct drm_i915_gem_object, obj_exec_link);
258 	struct drm_i915_gem_object *b =
259 		container_of(B, struct drm_i915_gem_object, obj_exec_link);
260 
261 	return a->stolen->start - b->stolen->start;
262 }
263 
264 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
265 {
266 	struct drm_info_node *node = m->private;
267 	struct drm_device *dev = node->minor->dev;
268 	struct drm_i915_private *dev_priv = dev->dev_private;
269 	struct drm_i915_gem_object *obj;
270 	u64 total_obj_size, total_gtt_size;
271 	LIST_HEAD(stolen);
272 	int count, ret;
273 
274 	ret = mutex_lock_interruptible(&dev->struct_mutex);
275 	if (ret)
276 		return ret;
277 
278 	total_obj_size = total_gtt_size = count = 0;
279 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
280 		if (obj->stolen == NULL)
281 			continue;
282 
283 		list_add(&obj->obj_exec_link, &stolen);
284 
285 		total_obj_size += obj->base.size;
286 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
287 		count++;
288 	}
289 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
290 		if (obj->stolen == NULL)
291 			continue;
292 
293 		list_add(&obj->obj_exec_link, &stolen);
294 
295 		total_obj_size += obj->base.size;
296 		count++;
297 	}
298 	list_sort(NULL, &stolen, obj_rank_by_stolen);
299 	seq_puts(m, "Stolen:\n");
300 	while (!list_empty(&stolen)) {
301 		obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
302 		seq_puts(m, "   ");
303 		describe_obj(m, obj);
304 		seq_putc(m, '\n');
305 		list_del_init(&obj->obj_exec_link);
306 	}
307 	mutex_unlock(&dev->struct_mutex);
308 
309 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
310 		   count, total_obj_size, total_gtt_size);
311 	return 0;
312 }
313 
314 #define count_objects(list, member) do { \
315 	list_for_each_entry(obj, list, member) { \
316 		size += i915_gem_obj_total_ggtt_size(obj); \
317 		++count; \
318 		if (obj->map_and_fenceable) { \
319 			mappable_size += i915_gem_obj_ggtt_size(obj); \
320 			++mappable_count; \
321 		} \
322 	} \
323 } while (0)
324 
325 struct file_stats {
326 	struct drm_i915_file_private *file_priv;
327 	unsigned long count;
328 	u64 total, unbound;
329 	u64 global, shared;
330 	u64 active, inactive;
331 };
332 
333 static int per_file_stats(int id, void *ptr, void *data)
334 {
335 	struct drm_i915_gem_object *obj = ptr;
336 	struct file_stats *stats = data;
337 	struct i915_vma *vma;
338 
339 	stats->count++;
340 	stats->total += obj->base.size;
341 
342 	if (obj->base.name || obj->base.dma_buf)
343 		stats->shared += obj->base.size;
344 
345 	if (USES_FULL_PPGTT(obj->base.dev)) {
346 		list_for_each_entry(vma, &obj->vma_list, vma_link) {
347 			struct i915_hw_ppgtt *ppgtt;
348 
349 			if (!drm_mm_node_allocated(&vma->node))
350 				continue;
351 
352 			if (i915_is_ggtt(vma->vm)) {
353 				stats->global += obj->base.size;
354 				continue;
355 			}
356 
357 			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
358 			if (ppgtt->file_priv != stats->file_priv)
359 				continue;
360 
361 			if (obj->active) /* XXX per-vma statistic */
362 				stats->active += obj->base.size;
363 			else
364 				stats->inactive += obj->base.size;
365 
366 			return 0;
367 		}
368 	} else {
369 		if (i915_gem_obj_ggtt_bound(obj)) {
370 			stats->global += obj->base.size;
371 			if (obj->active)
372 				stats->active += obj->base.size;
373 			else
374 				stats->inactive += obj->base.size;
375 			return 0;
376 		}
377 	}
378 
379 	if (!list_empty(&obj->global_list))
380 		stats->unbound += obj->base.size;
381 
382 	return 0;
383 }
384 
385 #define print_file_stats(m, name, stats) do { \
386 	if (stats.count) \
387 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
388 			   name, \
389 			   stats.count, \
390 			   stats.total, \
391 			   stats.active, \
392 			   stats.inactive, \
393 			   stats.global, \
394 			   stats.shared, \
395 			   stats.unbound); \
396 } while (0)
397 
398 static void print_batch_pool_stats(struct seq_file *m,
399 				   struct drm_i915_private *dev_priv)
400 {
401 	struct drm_i915_gem_object *obj;
402 	struct file_stats stats;
403 	struct intel_engine_cs *ring;
404 	int i, j;
405 
406 	memset(&stats, 0, sizeof(stats));
407 
408 	for_each_ring(ring, dev_priv, i) {
409 		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
410 			list_for_each_entry(obj,
411 					    &ring->batch_pool.cache_list[j],
412 					    batch_pool_link)
413 				per_file_stats(0, obj, &stats);
414 		}
415 	}
416 
417 	print_file_stats(m, "[k]batch pool", stats);
418 }
419 
420 #define count_vmas(list, member) do { \
421 	list_for_each_entry(vma, list, member) { \
422 		size += i915_gem_obj_total_ggtt_size(vma->obj); \
423 		++count; \
424 		if (vma->obj->map_and_fenceable) { \
425 			mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
426 			++mappable_count; \
427 		} \
428 	} \
429 } while (0)
430 
431 static int i915_gem_object_info(struct seq_file *m, void* data)
432 {
433 	struct drm_info_node *node = m->private;
434 	struct drm_device *dev = node->minor->dev;
435 	struct drm_i915_private *dev_priv = dev->dev_private;
436 	u32 count, mappable_count, purgeable_count;
437 	u64 size, mappable_size, purgeable_size;
438 	struct drm_i915_gem_object *obj;
439 	struct i915_address_space *vm = &dev_priv->gtt.base;
440 	struct drm_file *file;
441 	struct i915_vma *vma;
442 	int ret;
443 
444 	ret = mutex_lock_interruptible(&dev->struct_mutex);
445 	if (ret)
446 		return ret;
447 
448 	seq_printf(m, "%u objects, %zu bytes\n",
449 		   dev_priv->mm.object_count,
450 		   dev_priv->mm.object_memory);
451 
452 	size = count = mappable_size = mappable_count = 0;
453 	count_objects(&dev_priv->mm.bound_list, global_list);
454 	seq_printf(m, "%u [%u] objects, %llu [%llu] bytes in gtt\n",
455 		   count, mappable_count, size, mappable_size);
456 
457 	size = count = mappable_size = mappable_count = 0;
458 	count_vmas(&vm->active_list, mm_list);
459 	seq_printf(m, "  %u [%u] active objects, %llu [%llu] bytes\n",
460 		   count, mappable_count, size, mappable_size);
461 
462 	size = count = mappable_size = mappable_count = 0;
463 	count_vmas(&vm->inactive_list, mm_list);
464 	seq_printf(m, "  %u [%u] inactive objects, %llu [%llu] bytes\n",
465 		   count, mappable_count, size, mappable_size);
466 
467 	size = count = purgeable_size = purgeable_count = 0;
468 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
469 		size += obj->base.size, ++count;
470 		if (obj->madv == I915_MADV_DONTNEED)
471 			purgeable_size += obj->base.size, ++purgeable_count;
472 	}
473 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
474 
475 	size = count = mappable_size = mappable_count = 0;
476 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
477 		if (obj->fault_mappable) {
478 			size += i915_gem_obj_ggtt_size(obj);
479 			++count;
480 		}
481 		if (obj->pin_display) {
482 			mappable_size += i915_gem_obj_ggtt_size(obj);
483 			++mappable_count;
484 		}
485 		if (obj->madv == I915_MADV_DONTNEED) {
486 			purgeable_size += obj->base.size;
487 			++purgeable_count;
488 		}
489 	}
490 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
491 		   purgeable_count, purgeable_size);
492 	seq_printf(m, "%u pinned mappable objects, %llu bytes\n",
493 		   mappable_count, mappable_size);
494 	seq_printf(m, "%u fault mappable objects, %llu bytes\n",
495 		   count, size);
496 
497 	seq_printf(m, "%llu [%llu] gtt total\n",
498 		   dev_priv->gtt.base.total,
499 		   (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
500 
501 	seq_putc(m, '\n');
502 	print_batch_pool_stats(m, dev_priv);
503 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
504 		struct file_stats stats;
505 		struct task_struct *task;
506 
507 		memset(&stats, 0, sizeof(stats));
508 		stats.file_priv = file->driver_priv;
509 		spin_lock(&file->table_lock);
510 		idr_for_each(&file->object_idr, per_file_stats, &stats);
511 		spin_unlock(&file->table_lock);
512 		/*
513 		 * Although we have a valid reference on file->pid, that does
514 		 * not guarantee that the task_struct who called get_pid() is
515 		 * still alive (e.g. get_pid(current) => fork() => exit()).
516 		 * Therefore, we need to protect this ->comm access using RCU.
517 		 */
518 		rcu_read_lock();
519 		task = pid_task(file->pid, PIDTYPE_PID);
520 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
521 		rcu_read_unlock();
522 	}
523 
524 	mutex_unlock(&dev->struct_mutex);
525 
526 	return 0;
527 }
528 
529 static int i915_gem_gtt_info(struct seq_file *m, void *data)
530 {
531 	struct drm_info_node *node = m->private;
532 	struct drm_device *dev = node->minor->dev;
533 	uintptr_t list = (uintptr_t) node->info_ent->data;
534 	struct drm_i915_private *dev_priv = dev->dev_private;
535 	struct drm_i915_gem_object *obj;
536 	u64 total_obj_size, total_gtt_size;
537 	int count, ret;
538 
539 	ret = mutex_lock_interruptible(&dev->struct_mutex);
540 	if (ret)
541 		return ret;
542 
543 	total_obj_size = total_gtt_size = count = 0;
544 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
545 		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
546 			continue;
547 
548 		seq_puts(m, "   ");
549 		describe_obj(m, obj);
550 		seq_putc(m, '\n');
551 		total_obj_size += obj->base.size;
552 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
553 		count++;
554 	}
555 
556 	mutex_unlock(&dev->struct_mutex);
557 
558 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
559 		   count, total_obj_size, total_gtt_size);
560 
561 	return 0;
562 }
563 
564 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
565 {
566 	struct drm_info_node *node = m->private;
567 	struct drm_device *dev = node->minor->dev;
568 	struct drm_i915_private *dev_priv = dev->dev_private;
569 	struct intel_crtc *crtc;
570 	int ret;
571 
572 	ret = mutex_lock_interruptible(&dev->struct_mutex);
573 	if (ret)
574 		return ret;
575 
576 	for_each_intel_crtc(dev, crtc) {
577 		const char pipe = pipe_name(crtc->pipe);
578 		const char plane = plane_name(crtc->plane);
579 		struct intel_unpin_work *work;
580 
581 		spin_lock_irq(&dev->event_lock);
582 		work = crtc->unpin_work;
583 		if (work == NULL) {
584 			seq_printf(m, "No flip due on pipe %c (plane %c)\n",
585 				   pipe, plane);
586 		} else {
587 			u32 addr;
588 
589 			if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
590 				seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
591 					   pipe, plane);
592 			} else {
593 				seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
594 					   pipe, plane);
595 			}
596 			if (work->flip_queued_req) {
597 				struct intel_engine_cs *ring =
598 					i915_gem_request_get_ring(work->flip_queued_req);
599 
600 				seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
601 					   ring->name,
602 					   i915_gem_request_get_seqno(work->flip_queued_req),
603 					   dev_priv->next_seqno,
604 					   ring->get_seqno(ring, true),
605 					   i915_gem_request_completed(work->flip_queued_req, true));
606 			} else
607 				seq_printf(m, "Flip not associated with any ring\n");
608 			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
609 				   work->flip_queued_vblank,
610 				   work->flip_ready_vblank,
611 				   drm_crtc_vblank_count(&crtc->base));
612 			if (work->enable_stall_check)
613 				seq_puts(m, "Stall check enabled, ");
614 			else
615 				seq_puts(m, "Stall check waiting for page flip ioctl, ");
616 			seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
617 
618 			if (INTEL_INFO(dev)->gen >= 4)
619 				addr = I915_HI_DISPBASE(I915_READ(DSPSURF(crtc->plane)));
620 			else
621 				addr = I915_READ(DSPADDR(crtc->plane));
622 			seq_printf(m, "Current scanout address 0x%08x\n", addr);
623 
624 			if (work->pending_flip_obj) {
625 				seq_printf(m, "New framebuffer address 0x%08lx\n", (long)work->gtt_offset);
626 				seq_printf(m, "MMIO update completed? %d\n",  addr == work->gtt_offset);
627 			}
628 		}
629 		spin_unlock_irq(&dev->event_lock);
630 	}
631 
632 	mutex_unlock(&dev->struct_mutex);
633 
634 	return 0;
635 }
636 
637 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
638 {
639 	struct drm_info_node *node = m->private;
640 	struct drm_device *dev = node->minor->dev;
641 	struct drm_i915_private *dev_priv = dev->dev_private;
642 	struct drm_i915_gem_object *obj;
643 	struct intel_engine_cs *ring;
644 	int total = 0;
645 	int ret, i, j;
646 
647 	ret = mutex_lock_interruptible(&dev->struct_mutex);
648 	if (ret)
649 		return ret;
650 
651 	for_each_ring(ring, dev_priv, i) {
652 		for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) {
653 			int count;
654 
655 			count = 0;
656 			list_for_each_entry(obj,
657 					    &ring->batch_pool.cache_list[j],
658 					    batch_pool_link)
659 				count++;
660 			seq_printf(m, "%s cache[%d]: %d objects\n",
661 				   ring->name, j, count);
662 
663 			list_for_each_entry(obj,
664 					    &ring->batch_pool.cache_list[j],
665 					    batch_pool_link) {
666 				seq_puts(m, "   ");
667 				describe_obj(m, obj);
668 				seq_putc(m, '\n');
669 			}
670 
671 			total += count;
672 		}
673 	}
674 
675 	seq_printf(m, "total: %d\n", total);
676 
677 	mutex_unlock(&dev->struct_mutex);
678 
679 	return 0;
680 }
681 
682 static int i915_gem_request_info(struct seq_file *m, void *data)
683 {
684 	struct drm_info_node *node = m->private;
685 	struct drm_device *dev = node->minor->dev;
686 	struct drm_i915_private *dev_priv = dev->dev_private;
687 	struct intel_engine_cs *ring;
688 	struct drm_i915_gem_request *req;
689 	int ret, any, i;
690 
691 	ret = mutex_lock_interruptible(&dev->struct_mutex);
692 	if (ret)
693 		return ret;
694 
695 	any = 0;
696 	for_each_ring(ring, dev_priv, i) {
697 		int count;
698 
699 		count = 0;
700 		list_for_each_entry(req, &ring->request_list, list)
701 			count++;
702 		if (count == 0)
703 			continue;
704 
705 		seq_printf(m, "%s requests: %d\n", ring->name, count);
706 		list_for_each_entry(req, &ring->request_list, list) {
707 			struct task_struct *task;
708 
709 			rcu_read_lock();
710 			task = NULL;
711 			if (req->pid)
712 				task = pid_task(req->pid, PIDTYPE_PID);
713 			seq_printf(m, "    %x @ %d: %s [%d]\n",
714 				   req->seqno,
715 				   (int) (jiffies - req->emitted_jiffies),
716 				   task ? task->comm : "<unknown>",
717 				   task ? task->pid : -1);
718 			rcu_read_unlock();
719 		}
720 
721 		any++;
722 	}
723 	mutex_unlock(&dev->struct_mutex);
724 
725 	if (any == 0)
726 		seq_puts(m, "No requests\n");
727 
728 	return 0;
729 }
730 
731 static void i915_ring_seqno_info(struct seq_file *m,
732 				 struct intel_engine_cs *ring)
733 {
734 	if (ring->get_seqno) {
735 		seq_printf(m, "Current sequence (%s): %x\n",
736 			   ring->name, ring->get_seqno(ring, false));
737 	}
738 }
739 
740 static int i915_gem_seqno_info(struct seq_file *m, void *data)
741 {
742 	struct drm_info_node *node = m->private;
743 	struct drm_device *dev = node->minor->dev;
744 	struct drm_i915_private *dev_priv = dev->dev_private;
745 	struct intel_engine_cs *ring;
746 	int ret, i;
747 
748 	ret = mutex_lock_interruptible(&dev->struct_mutex);
749 	if (ret)
750 		return ret;
751 	intel_runtime_pm_get(dev_priv);
752 
753 	for_each_ring(ring, dev_priv, i)
754 		i915_ring_seqno_info(m, ring);
755 
756 	intel_runtime_pm_put(dev_priv);
757 	mutex_unlock(&dev->struct_mutex);
758 
759 	return 0;
760 }
761 
762 
763 static int i915_interrupt_info(struct seq_file *m, void *data)
764 {
765 	struct drm_info_node *node = m->private;
766 	struct drm_device *dev = node->minor->dev;
767 	struct drm_i915_private *dev_priv = dev->dev_private;
768 	struct intel_engine_cs *ring;
769 	int ret, i, pipe;
770 
771 	ret = mutex_lock_interruptible(&dev->struct_mutex);
772 	if (ret)
773 		return ret;
774 	intel_runtime_pm_get(dev_priv);
775 
776 	if (IS_CHERRYVIEW(dev)) {
777 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
778 			   I915_READ(GEN8_MASTER_IRQ));
779 
780 		seq_printf(m, "Display IER:\t%08x\n",
781 			   I915_READ(VLV_IER));
782 		seq_printf(m, "Display IIR:\t%08x\n",
783 			   I915_READ(VLV_IIR));
784 		seq_printf(m, "Display IIR_RW:\t%08x\n",
785 			   I915_READ(VLV_IIR_RW));
786 		seq_printf(m, "Display IMR:\t%08x\n",
787 			   I915_READ(VLV_IMR));
788 		for_each_pipe(dev_priv, pipe)
789 			seq_printf(m, "Pipe %c stat:\t%08x\n",
790 				   pipe_name(pipe),
791 				   I915_READ(PIPESTAT(pipe)));
792 
793 		seq_printf(m, "Port hotplug:\t%08x\n",
794 			   I915_READ(PORT_HOTPLUG_EN));
795 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
796 			   I915_READ(VLV_DPFLIPSTAT));
797 		seq_printf(m, "DPINVGTT:\t%08x\n",
798 			   I915_READ(DPINVGTT));
799 
800 		for (i = 0; i < 4; i++) {
801 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
802 				   i, I915_READ(GEN8_GT_IMR(i)));
803 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
804 				   i, I915_READ(GEN8_GT_IIR(i)));
805 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
806 				   i, I915_READ(GEN8_GT_IER(i)));
807 		}
808 
809 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
810 			   I915_READ(GEN8_PCU_IMR));
811 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
812 			   I915_READ(GEN8_PCU_IIR));
813 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
814 			   I915_READ(GEN8_PCU_IER));
815 	} else if (INTEL_INFO(dev)->gen >= 8) {
816 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
817 			   I915_READ(GEN8_MASTER_IRQ));
818 
819 		for (i = 0; i < 4; i++) {
820 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
821 				   i, I915_READ(GEN8_GT_IMR(i)));
822 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
823 				   i, I915_READ(GEN8_GT_IIR(i)));
824 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
825 				   i, I915_READ(GEN8_GT_IER(i)));
826 		}
827 
828 		for_each_pipe(dev_priv, pipe) {
829 			if (!intel_display_power_is_enabled(dev_priv,
830 						POWER_DOMAIN_PIPE(pipe))) {
831 				seq_printf(m, "Pipe %c power disabled\n",
832 					   pipe_name(pipe));
833 				continue;
834 			}
835 			seq_printf(m, "Pipe %c IMR:\t%08x\n",
836 				   pipe_name(pipe),
837 				   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
838 			seq_printf(m, "Pipe %c IIR:\t%08x\n",
839 				   pipe_name(pipe),
840 				   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
841 			seq_printf(m, "Pipe %c IER:\t%08x\n",
842 				   pipe_name(pipe),
843 				   I915_READ(GEN8_DE_PIPE_IER(pipe)));
844 		}
845 
846 		seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
847 			   I915_READ(GEN8_DE_PORT_IMR));
848 		seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
849 			   I915_READ(GEN8_DE_PORT_IIR));
850 		seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
851 			   I915_READ(GEN8_DE_PORT_IER));
852 
853 		seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
854 			   I915_READ(GEN8_DE_MISC_IMR));
855 		seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
856 			   I915_READ(GEN8_DE_MISC_IIR));
857 		seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
858 			   I915_READ(GEN8_DE_MISC_IER));
859 
860 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
861 			   I915_READ(GEN8_PCU_IMR));
862 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
863 			   I915_READ(GEN8_PCU_IIR));
864 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
865 			   I915_READ(GEN8_PCU_IER));
866 	} else if (IS_VALLEYVIEW(dev)) {
867 		seq_printf(m, "Display IER:\t%08x\n",
868 			   I915_READ(VLV_IER));
869 		seq_printf(m, "Display IIR:\t%08x\n",
870 			   I915_READ(VLV_IIR));
871 		seq_printf(m, "Display IIR_RW:\t%08x\n",
872 			   I915_READ(VLV_IIR_RW));
873 		seq_printf(m, "Display IMR:\t%08x\n",
874 			   I915_READ(VLV_IMR));
875 		for_each_pipe(dev_priv, pipe)
876 			seq_printf(m, "Pipe %c stat:\t%08x\n",
877 				   pipe_name(pipe),
878 				   I915_READ(PIPESTAT(pipe)));
879 
880 		seq_printf(m, "Master IER:\t%08x\n",
881 			   I915_READ(VLV_MASTER_IER));
882 
883 		seq_printf(m, "Render IER:\t%08x\n",
884 			   I915_READ(GTIER));
885 		seq_printf(m, "Render IIR:\t%08x\n",
886 			   I915_READ(GTIIR));
887 		seq_printf(m, "Render IMR:\t%08x\n",
888 			   I915_READ(GTIMR));
889 
890 		seq_printf(m, "PM IER:\t\t%08x\n",
891 			   I915_READ(GEN6_PMIER));
892 		seq_printf(m, "PM IIR:\t\t%08x\n",
893 			   I915_READ(GEN6_PMIIR));
894 		seq_printf(m, "PM IMR:\t\t%08x\n",
895 			   I915_READ(GEN6_PMIMR));
896 
897 		seq_printf(m, "Port hotplug:\t%08x\n",
898 			   I915_READ(PORT_HOTPLUG_EN));
899 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
900 			   I915_READ(VLV_DPFLIPSTAT));
901 		seq_printf(m, "DPINVGTT:\t%08x\n",
902 			   I915_READ(DPINVGTT));
903 
904 	} else if (!HAS_PCH_SPLIT(dev)) {
905 		seq_printf(m, "Interrupt enable:    %08x\n",
906 			   I915_READ(IER));
907 		seq_printf(m, "Interrupt identity:  %08x\n",
908 			   I915_READ(IIR));
909 		seq_printf(m, "Interrupt mask:      %08x\n",
910 			   I915_READ(IMR));
911 		for_each_pipe(dev_priv, pipe)
912 			seq_printf(m, "Pipe %c stat:         %08x\n",
913 				   pipe_name(pipe),
914 				   I915_READ(PIPESTAT(pipe)));
915 	} else {
916 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
917 			   I915_READ(DEIER));
918 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
919 			   I915_READ(DEIIR));
920 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
921 			   I915_READ(DEIMR));
922 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
923 			   I915_READ(SDEIER));
924 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
925 			   I915_READ(SDEIIR));
926 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
927 			   I915_READ(SDEIMR));
928 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
929 			   I915_READ(GTIER));
930 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
931 			   I915_READ(GTIIR));
932 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
933 			   I915_READ(GTIMR));
934 	}
935 	for_each_ring(ring, dev_priv, i) {
936 		if (INTEL_INFO(dev)->gen >= 6) {
937 			seq_printf(m,
938 				   "Graphics Interrupt mask (%s):	%08x\n",
939 				   ring->name, I915_READ_IMR(ring));
940 		}
941 		i915_ring_seqno_info(m, ring);
942 	}
943 	intel_runtime_pm_put(dev_priv);
944 	mutex_unlock(&dev->struct_mutex);
945 
946 	return 0;
947 }
948 
949 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
950 {
951 	struct drm_info_node *node = m->private;
952 	struct drm_device *dev = node->minor->dev;
953 	struct drm_i915_private *dev_priv = dev->dev_private;
954 	int i, ret;
955 
956 	ret = mutex_lock_interruptible(&dev->struct_mutex);
957 	if (ret)
958 		return ret;
959 
960 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
961 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
962 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
963 		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
964 
965 		seq_printf(m, "Fence %d, pin count = %d, object = ",
966 			   i, dev_priv->fence_regs[i].pin_count);
967 		if (obj == NULL)
968 			seq_puts(m, "unused");
969 		else
970 			describe_obj(m, obj);
971 		seq_putc(m, '\n');
972 	}
973 
974 	mutex_unlock(&dev->struct_mutex);
975 	return 0;
976 }
977 
978 static int i915_hws_info(struct seq_file *m, void *data)
979 {
980 	struct drm_info_node *node = m->private;
981 	struct drm_device *dev = node->minor->dev;
982 	struct drm_i915_private *dev_priv = dev->dev_private;
983 	struct intel_engine_cs *ring;
984 	const u32 *hws;
985 	int i;
986 
987 	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
988 	hws = ring->status_page.page_addr;
989 	if (hws == NULL)
990 		return 0;
991 
992 	for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
993 		seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
994 			   i * 4,
995 			   hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
996 	}
997 	return 0;
998 }
999 
1000 static ssize_t
1001 i915_error_state_write(struct file *filp,
1002 		       const char __user *ubuf,
1003 		       size_t cnt,
1004 		       loff_t *ppos)
1005 {
1006 	struct i915_error_state_file_priv *error_priv = filp->private_data;
1007 	struct drm_device *dev = error_priv->dev;
1008 	int ret;
1009 
1010 	DRM_DEBUG_DRIVER("Resetting error state\n");
1011 
1012 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1013 	if (ret)
1014 		return ret;
1015 
1016 	i915_destroy_error_state(dev);
1017 	mutex_unlock(&dev->struct_mutex);
1018 
1019 	return cnt;
1020 }
1021 
1022 static int i915_error_state_open(struct inode *inode, struct file *file)
1023 {
1024 	struct drm_device *dev = inode->i_private;
1025 	struct i915_error_state_file_priv *error_priv;
1026 
1027 	error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
1028 	if (!error_priv)
1029 		return -ENOMEM;
1030 
1031 	error_priv->dev = dev;
1032 
1033 	i915_error_state_get(dev, error_priv);
1034 
1035 	file->private_data = error_priv;
1036 
1037 	return 0;
1038 }
1039 
1040 static int i915_error_state_release(struct inode *inode, struct file *file)
1041 {
1042 	struct i915_error_state_file_priv *error_priv = file->private_data;
1043 
1044 	i915_error_state_put(error_priv);
1045 	kfree(error_priv);
1046 
1047 	return 0;
1048 }
1049 
1050 static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
1051 				     size_t count, loff_t *pos)
1052 {
1053 	struct i915_error_state_file_priv *error_priv = file->private_data;
1054 	struct drm_i915_error_state_buf error_str;
1055 	loff_t tmp_pos = 0;
1056 	ssize_t ret_count = 0;
1057 	int ret;
1058 
1059 	ret = i915_error_state_buf_init(&error_str, to_i915(error_priv->dev), count, *pos);
1060 	if (ret)
1061 		return ret;
1062 
1063 	ret = i915_error_state_to_str(&error_str, error_priv);
1064 	if (ret)
1065 		goto out;
1066 
1067 	ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
1068 					    error_str.buf,
1069 					    error_str.bytes);
1070 
1071 	if (ret_count < 0)
1072 		ret = ret_count;
1073 	else
1074 		*pos = error_str.start + ret_count;
1075 out:
1076 	i915_error_state_buf_release(&error_str);
1077 	return ret ?: ret_count;
1078 }
1079 
1080 static const struct file_operations i915_error_state_fops = {
1081 	.owner = THIS_MODULE,
1082 	.open = i915_error_state_open,
1083 	.read = i915_error_state_read,
1084 	.write = i915_error_state_write,
1085 	.llseek = default_llseek,
1086 	.release = i915_error_state_release,
1087 };
1088 
1089 static int
1090 i915_next_seqno_get(void *data, u64 *val)
1091 {
1092 	struct drm_device *dev = data;
1093 	struct drm_i915_private *dev_priv = dev->dev_private;
1094 	int ret;
1095 
1096 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1097 	if (ret)
1098 		return ret;
1099 
1100 	*val = dev_priv->next_seqno;
1101 	mutex_unlock(&dev->struct_mutex);
1102 
1103 	return 0;
1104 }
1105 
1106 static int
1107 i915_next_seqno_set(void *data, u64 val)
1108 {
1109 	struct drm_device *dev = data;
1110 	int ret;
1111 
1112 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1113 	if (ret)
1114 		return ret;
1115 
1116 	ret = i915_gem_set_seqno(dev, val);
1117 	mutex_unlock(&dev->struct_mutex);
1118 
1119 	return ret;
1120 }
1121 
1122 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1123 			i915_next_seqno_get, i915_next_seqno_set,
1124 			"0x%llx\n");
1125 
1126 static int i915_frequency_info(struct seq_file *m, void *unused)
1127 {
1128 	struct drm_info_node *node = m->private;
1129 	struct drm_device *dev = node->minor->dev;
1130 	struct drm_i915_private *dev_priv = dev->dev_private;
1131 	int ret = 0;
1132 
1133 	intel_runtime_pm_get(dev_priv);
1134 
1135 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1136 
1137 	if (IS_GEN5(dev)) {
1138 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1139 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1140 
1141 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1142 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1143 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1144 			   MEMSTAT_VID_SHIFT);
1145 		seq_printf(m, "Current P-state: %d\n",
1146 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1147 	} else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
1148 		   IS_BROADWELL(dev) || IS_GEN9(dev)) {
1149 		u32 rp_state_limits;
1150 		u32 gt_perf_status;
1151 		u32 rp_state_cap;
1152 		u32 rpmodectl, rpinclimit, rpdeclimit;
1153 		u32 rpstat, cagf, reqf;
1154 		u32 rpupei, rpcurup, rpprevup;
1155 		u32 rpdownei, rpcurdown, rpprevdown;
1156 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1157 		int max_freq;
1158 
1159 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1160 		if (IS_BROXTON(dev)) {
1161 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1162 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1163 		} else {
1164 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1165 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1166 		}
1167 
1168 		/* RPSTAT1 is in the GT power well */
1169 		ret = mutex_lock_interruptible(&dev->struct_mutex);
1170 		if (ret)
1171 			goto out;
1172 
1173 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1174 
1175 		reqf = I915_READ(GEN6_RPNSWREQ);
1176 		if (IS_GEN9(dev))
1177 			reqf >>= 23;
1178 		else {
1179 			reqf &= ~GEN6_TURBO_DISABLE;
1180 			if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1181 				reqf >>= 24;
1182 			else
1183 				reqf >>= 25;
1184 		}
1185 		reqf = intel_gpu_freq(dev_priv, reqf);
1186 
1187 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1188 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1189 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1190 
1191 		rpstat = I915_READ(GEN6_RPSTAT1);
1192 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
1193 		rpcurup = I915_READ(GEN6_RP_CUR_UP);
1194 		rpprevup = I915_READ(GEN6_RP_PREV_UP);
1195 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
1196 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
1197 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
1198 		if (IS_GEN9(dev))
1199 			cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
1200 		else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1201 			cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
1202 		else
1203 			cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
1204 		cagf = intel_gpu_freq(dev_priv, cagf);
1205 
1206 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1207 		mutex_unlock(&dev->struct_mutex);
1208 
1209 		if (IS_GEN6(dev) || IS_GEN7(dev)) {
1210 			pm_ier = I915_READ(GEN6_PMIER);
1211 			pm_imr = I915_READ(GEN6_PMIMR);
1212 			pm_isr = I915_READ(GEN6_PMISR);
1213 			pm_iir = I915_READ(GEN6_PMIIR);
1214 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1215 		} else {
1216 			pm_ier = I915_READ(GEN8_GT_IER(2));
1217 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1218 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1219 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1220 			pm_mask = I915_READ(GEN6_PMINTRMSK);
1221 		}
1222 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1223 			   pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1224 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1225 		seq_printf(m, "Render p-state ratio: %d\n",
1226 			   (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
1227 		seq_printf(m, "Render p-state VID: %d\n",
1228 			   gt_perf_status & 0xff);
1229 		seq_printf(m, "Render p-state limit: %d\n",
1230 			   rp_state_limits & 0xff);
1231 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1232 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1233 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1234 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1235 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1236 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1237 		seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
1238 			   GEN6_CURICONT_MASK);
1239 		seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
1240 			   GEN6_CURBSYTAVG_MASK);
1241 		seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
1242 			   GEN6_CURBSYTAVG_MASK);
1243 		seq_printf(m, "Up threshold: %d%%\n",
1244 			   dev_priv->rps.up_threshold);
1245 
1246 		seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
1247 			   GEN6_CURIAVG_MASK);
1248 		seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
1249 			   GEN6_CURBSYTAVG_MASK);
1250 		seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
1251 			   GEN6_CURBSYTAVG_MASK);
1252 		seq_printf(m, "Down threshold: %d%%\n",
1253 			   dev_priv->rps.down_threshold);
1254 
1255 		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 0 :
1256 			    rp_state_cap >> 16) & 0xff;
1257 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
1258 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1259 			   intel_gpu_freq(dev_priv, max_freq));
1260 
1261 		max_freq = (rp_state_cap & 0xff00) >> 8;
1262 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
1263 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1264 			   intel_gpu_freq(dev_priv, max_freq));
1265 
1266 		max_freq = (IS_BROXTON(dev) ? rp_state_cap >> 16 :
1267 			    rp_state_cap >> 0) & 0xff;
1268 		max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
1269 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1270 			   intel_gpu_freq(dev_priv, max_freq));
1271 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1272 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1273 
1274 		seq_printf(m, "Current freq: %d MHz\n",
1275 			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1276 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1277 		seq_printf(m, "Idle freq: %d MHz\n",
1278 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1279 		seq_printf(m, "Min freq: %d MHz\n",
1280 			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1281 		seq_printf(m, "Max freq: %d MHz\n",
1282 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1283 		seq_printf(m,
1284 			   "efficient (RPe) frequency: %d MHz\n",
1285 			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1286 	} else if (IS_VALLEYVIEW(dev)) {
1287 		u32 freq_sts;
1288 
1289 		mutex_lock(&dev_priv->rps.hw_lock);
1290 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1291 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1292 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1293 
1294 		seq_printf(m, "actual GPU freq: %d MHz\n",
1295 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1296 
1297 		seq_printf(m, "current GPU freq: %d MHz\n",
1298 			   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
1299 
1300 		seq_printf(m, "max GPU freq: %d MHz\n",
1301 			   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
1302 
1303 		seq_printf(m, "min GPU freq: %d MHz\n",
1304 			   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
1305 
1306 		seq_printf(m, "idle GPU freq: %d MHz\n",
1307 			   intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
1308 
1309 		seq_printf(m,
1310 			   "efficient (RPe) frequency: %d MHz\n",
1311 			   intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
1312 		mutex_unlock(&dev_priv->rps.hw_lock);
1313 	} else {
1314 		seq_puts(m, "no P-state info available\n");
1315 	}
1316 
1317 out:
1318 	intel_runtime_pm_put(dev_priv);
1319 	return ret;
1320 }
1321 
1322 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1323 {
1324 	struct drm_info_node *node = m->private;
1325 	struct drm_device *dev = node->minor->dev;
1326 	struct drm_i915_private *dev_priv = dev->dev_private;
1327 	struct intel_engine_cs *ring;
1328 	u64 acthd[I915_NUM_RINGS];
1329 	u32 seqno[I915_NUM_RINGS];
1330 	int i;
1331 
1332 	if (!i915.enable_hangcheck) {
1333 		seq_printf(m, "Hangcheck disabled\n");
1334 		return 0;
1335 	}
1336 
1337 	intel_runtime_pm_get(dev_priv);
1338 
1339 	for_each_ring(ring, dev_priv, i) {
1340 		seqno[i] = ring->get_seqno(ring, false);
1341 		acthd[i] = intel_ring_get_active_head(ring);
1342 	}
1343 
1344 	intel_runtime_pm_put(dev_priv);
1345 
1346 	if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
1347 		seq_printf(m, "Hangcheck active, fires in %dms\n",
1348 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1349 					    jiffies));
1350 	} else
1351 		seq_printf(m, "Hangcheck inactive\n");
1352 
1353 	for_each_ring(ring, dev_priv, i) {
1354 		seq_printf(m, "%s:\n", ring->name);
1355 		seq_printf(m, "\tseqno = %x [current %x]\n",
1356 			   ring->hangcheck.seqno, seqno[i]);
1357 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1358 			   (long long)ring->hangcheck.acthd,
1359 			   (long long)acthd[i]);
1360 		seq_printf(m, "\tmax ACTHD = 0x%08llx\n",
1361 			   (long long)ring->hangcheck.max_acthd);
1362 		seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
1363 		seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 static int ironlake_drpc_info(struct seq_file *m)
1370 {
1371 	struct drm_info_node *node = m->private;
1372 	struct drm_device *dev = node->minor->dev;
1373 	struct drm_i915_private *dev_priv = dev->dev_private;
1374 	u32 rgvmodectl, rstdbyctl;
1375 	u16 crstandvid;
1376 	int ret;
1377 
1378 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1379 	if (ret)
1380 		return ret;
1381 	intel_runtime_pm_get(dev_priv);
1382 
1383 	rgvmodectl = I915_READ(MEMMODECTL);
1384 	rstdbyctl = I915_READ(RSTDBYCTL);
1385 	crstandvid = I915_READ16(CRSTANDVID);
1386 
1387 	intel_runtime_pm_put(dev_priv);
1388 	mutex_unlock(&dev->struct_mutex);
1389 
1390 	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
1391 		   "yes" : "no");
1392 	seq_printf(m, "Boost freq: %d\n",
1393 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1394 		   MEMMODE_BOOST_FREQ_SHIFT);
1395 	seq_printf(m, "HW control enabled: %s\n",
1396 		   rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
1397 	seq_printf(m, "SW control enabled: %s\n",
1398 		   rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
1399 	seq_printf(m, "Gated voltage change: %s\n",
1400 		   rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
1401 	seq_printf(m, "Starting frequency: P%d\n",
1402 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1403 	seq_printf(m, "Max P-state: P%d\n",
1404 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1405 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1406 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1407 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1408 	seq_printf(m, "Render standby enabled: %s\n",
1409 		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
1410 	seq_puts(m, "Current RS state: ");
1411 	switch (rstdbyctl & RSX_STATUS_MASK) {
1412 	case RSX_STATUS_ON:
1413 		seq_puts(m, "on\n");
1414 		break;
1415 	case RSX_STATUS_RC1:
1416 		seq_puts(m, "RC1\n");
1417 		break;
1418 	case RSX_STATUS_RC1E:
1419 		seq_puts(m, "RC1E\n");
1420 		break;
1421 	case RSX_STATUS_RS1:
1422 		seq_puts(m, "RS1\n");
1423 		break;
1424 	case RSX_STATUS_RS2:
1425 		seq_puts(m, "RS2 (RC6)\n");
1426 		break;
1427 	case RSX_STATUS_RS3:
1428 		seq_puts(m, "RC3 (RC6+)\n");
1429 		break;
1430 	default:
1431 		seq_puts(m, "unknown\n");
1432 		break;
1433 	}
1434 
1435 	return 0;
1436 }
1437 
1438 static int i915_forcewake_domains(struct seq_file *m, void *data)
1439 {
1440 	struct drm_info_node *node = m->private;
1441 	struct drm_device *dev = node->minor->dev;
1442 	struct drm_i915_private *dev_priv = dev->dev_private;
1443 	struct intel_uncore_forcewake_domain *fw_domain;
1444 	int i;
1445 
1446 	spin_lock_irq(&dev_priv->uncore.lock);
1447 	for_each_fw_domain(fw_domain, dev_priv, i) {
1448 		seq_printf(m, "%s.wake_count = %u\n",
1449 			   intel_uncore_forcewake_domain_to_str(i),
1450 			   fw_domain->wake_count);
1451 	}
1452 	spin_unlock_irq(&dev_priv->uncore.lock);
1453 
1454 	return 0;
1455 }
1456 
1457 static int vlv_drpc_info(struct seq_file *m)
1458 {
1459 	struct drm_info_node *node = m->private;
1460 	struct drm_device *dev = node->minor->dev;
1461 	struct drm_i915_private *dev_priv = dev->dev_private;
1462 	u32 rpmodectl1, rcctl1, pw_status;
1463 
1464 	intel_runtime_pm_get(dev_priv);
1465 
1466 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1467 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1468 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1469 
1470 	intel_runtime_pm_put(dev_priv);
1471 
1472 	seq_printf(m, "Video Turbo Mode: %s\n",
1473 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1474 	seq_printf(m, "Turbo enabled: %s\n",
1475 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1476 	seq_printf(m, "HW control enabled: %s\n",
1477 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1478 	seq_printf(m, "SW control enabled: %s\n",
1479 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1480 			  GEN6_RP_MEDIA_SW_MODE));
1481 	seq_printf(m, "RC6 Enabled: %s\n",
1482 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1483 					GEN6_RC_CTL_EI_MODE(1))));
1484 	seq_printf(m, "Render Power Well: %s\n",
1485 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1486 	seq_printf(m, "Media Power Well: %s\n",
1487 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1488 
1489 	seq_printf(m, "Render RC6 residency since boot: %u\n",
1490 		   I915_READ(VLV_GT_RENDER_RC6));
1491 	seq_printf(m, "Media RC6 residency since boot: %u\n",
1492 		   I915_READ(VLV_GT_MEDIA_RC6));
1493 
1494 	return i915_forcewake_domains(m, NULL);
1495 }
1496 
1497 static int gen6_drpc_info(struct seq_file *m)
1498 {
1499 	struct drm_info_node *node = m->private;
1500 	struct drm_device *dev = node->minor->dev;
1501 	struct drm_i915_private *dev_priv = dev->dev_private;
1502 	u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1503 	unsigned forcewake_count;
1504 	int count = 0, ret;
1505 
1506 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1507 	if (ret)
1508 		return ret;
1509 	intel_runtime_pm_get(dev_priv);
1510 
1511 	spin_lock_irq(&dev_priv->uncore.lock);
1512 	forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
1513 	spin_unlock_irq(&dev_priv->uncore.lock);
1514 
1515 	if (forcewake_count) {
1516 		seq_puts(m, "RC information inaccurate because somebody "
1517 			    "holds a forcewake reference \n");
1518 	} else {
1519 		/* NB: we cannot use forcewake, else we read the wrong values */
1520 		while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
1521 			udelay(10);
1522 		seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
1523 	}
1524 
1525 	gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
1526 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1527 
1528 	rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1529 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1530 	mutex_unlock(&dev->struct_mutex);
1531 	mutex_lock(&dev_priv->rps.hw_lock);
1532 	sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1533 	mutex_unlock(&dev_priv->rps.hw_lock);
1534 
1535 	intel_runtime_pm_put(dev_priv);
1536 
1537 	seq_printf(m, "Video Turbo Mode: %s\n",
1538 		   yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
1539 	seq_printf(m, "HW control enabled: %s\n",
1540 		   yesno(rpmodectl1 & GEN6_RP_ENABLE));
1541 	seq_printf(m, "SW control enabled: %s\n",
1542 		   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
1543 			  GEN6_RP_MEDIA_SW_MODE));
1544 	seq_printf(m, "RC1e Enabled: %s\n",
1545 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1546 	seq_printf(m, "RC6 Enabled: %s\n",
1547 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1548 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1549 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1550 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1551 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1552 	seq_puts(m, "Current RC state: ");
1553 	switch (gt_core_status & GEN6_RCn_MASK) {
1554 	case GEN6_RC0:
1555 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1556 			seq_puts(m, "Core Power Down\n");
1557 		else
1558 			seq_puts(m, "on\n");
1559 		break;
1560 	case GEN6_RC3:
1561 		seq_puts(m, "RC3\n");
1562 		break;
1563 	case GEN6_RC6:
1564 		seq_puts(m, "RC6\n");
1565 		break;
1566 	case GEN6_RC7:
1567 		seq_puts(m, "RC7\n");
1568 		break;
1569 	default:
1570 		seq_puts(m, "Unknown\n");
1571 		break;
1572 	}
1573 
1574 	seq_printf(m, "Core Power Down: %s\n",
1575 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1576 
1577 	/* Not exactly sure what this is */
1578 	seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1579 		   I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1580 	seq_printf(m, "RC6 residency since boot: %u\n",
1581 		   I915_READ(GEN6_GT_GFX_RC6));
1582 	seq_printf(m, "RC6+ residency since boot: %u\n",
1583 		   I915_READ(GEN6_GT_GFX_RC6p));
1584 	seq_printf(m, "RC6++ residency since boot: %u\n",
1585 		   I915_READ(GEN6_GT_GFX_RC6pp));
1586 
1587 	seq_printf(m, "RC6   voltage: %dmV\n",
1588 		   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1589 	seq_printf(m, "RC6+  voltage: %dmV\n",
1590 		   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1591 	seq_printf(m, "RC6++ voltage: %dmV\n",
1592 		   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1593 	return 0;
1594 }
1595 
1596 static int i915_drpc_info(struct seq_file *m, void *unused)
1597 {
1598 	struct drm_info_node *node = m->private;
1599 	struct drm_device *dev = node->minor->dev;
1600 
1601 	if (IS_VALLEYVIEW(dev))
1602 		return vlv_drpc_info(m);
1603 	else if (INTEL_INFO(dev)->gen >= 6)
1604 		return gen6_drpc_info(m);
1605 	else
1606 		return ironlake_drpc_info(m);
1607 }
1608 
1609 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1610 {
1611 	struct drm_info_node *node = m->private;
1612 	struct drm_device *dev = node->minor->dev;
1613 	struct drm_i915_private *dev_priv = dev->dev_private;
1614 
1615 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1616 		   dev_priv->fb_tracking.busy_bits);
1617 
1618 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1619 		   dev_priv->fb_tracking.flip_bits);
1620 
1621 	return 0;
1622 }
1623 
1624 static int i915_fbc_status(struct seq_file *m, void *unused)
1625 {
1626 	struct drm_info_node *node = m->private;
1627 	struct drm_device *dev = node->minor->dev;
1628 	struct drm_i915_private *dev_priv = dev->dev_private;
1629 
1630 	if (!HAS_FBC(dev)) {
1631 		seq_puts(m, "FBC unsupported on this chipset\n");
1632 		return 0;
1633 	}
1634 
1635 	intel_runtime_pm_get(dev_priv);
1636 	mutex_lock(&dev_priv->fbc.lock);
1637 
1638 	if (intel_fbc_enabled(dev_priv))
1639 		seq_puts(m, "FBC enabled\n");
1640 	else
1641 		seq_printf(m, "FBC disabled: %s\n",
1642 			  intel_no_fbc_reason_str(dev_priv->fbc.no_fbc_reason));
1643 
1644 	if (INTEL_INFO(dev_priv)->gen >= 7)
1645 		seq_printf(m, "Compressing: %s\n",
1646 			   yesno(I915_READ(FBC_STATUS2) &
1647 				 FBC_COMPRESSION_MASK));
1648 
1649 	mutex_unlock(&dev_priv->fbc.lock);
1650 	intel_runtime_pm_put(dev_priv);
1651 
1652 	return 0;
1653 }
1654 
1655 static int i915_fbc_fc_get(void *data, u64 *val)
1656 {
1657 	struct drm_device *dev = data;
1658 	struct drm_i915_private *dev_priv = dev->dev_private;
1659 
1660 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1661 		return -ENODEV;
1662 
1663 	*val = dev_priv->fbc.false_color;
1664 
1665 	return 0;
1666 }
1667 
1668 static int i915_fbc_fc_set(void *data, u64 val)
1669 {
1670 	struct drm_device *dev = data;
1671 	struct drm_i915_private *dev_priv = dev->dev_private;
1672 	u32 reg;
1673 
1674 	if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1675 		return -ENODEV;
1676 
1677 	mutex_lock(&dev_priv->fbc.lock);
1678 
1679 	reg = I915_READ(ILK_DPFC_CONTROL);
1680 	dev_priv->fbc.false_color = val;
1681 
1682 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1683 		   (reg | FBC_CTL_FALSE_COLOR) :
1684 		   (reg & ~FBC_CTL_FALSE_COLOR));
1685 
1686 	mutex_unlock(&dev_priv->fbc.lock);
1687 	return 0;
1688 }
1689 
1690 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_fc_fops,
1691 			i915_fbc_fc_get, i915_fbc_fc_set,
1692 			"%llu\n");
1693 
1694 static int i915_ips_status(struct seq_file *m, void *unused)
1695 {
1696 	struct drm_info_node *node = m->private;
1697 	struct drm_device *dev = node->minor->dev;
1698 	struct drm_i915_private *dev_priv = dev->dev_private;
1699 
1700 	if (!HAS_IPS(dev)) {
1701 		seq_puts(m, "not supported\n");
1702 		return 0;
1703 	}
1704 
1705 	intel_runtime_pm_get(dev_priv);
1706 
1707 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1708 		   yesno(i915.enable_ips));
1709 
1710 	if (INTEL_INFO(dev)->gen >= 8) {
1711 		seq_puts(m, "Currently: unknown\n");
1712 	} else {
1713 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1714 			seq_puts(m, "Currently: enabled\n");
1715 		else
1716 			seq_puts(m, "Currently: disabled\n");
1717 	}
1718 
1719 	intel_runtime_pm_put(dev_priv);
1720 
1721 	return 0;
1722 }
1723 
1724 static int i915_sr_status(struct seq_file *m, void *unused)
1725 {
1726 	struct drm_info_node *node = m->private;
1727 	struct drm_device *dev = node->minor->dev;
1728 	struct drm_i915_private *dev_priv = dev->dev_private;
1729 	bool sr_enabled = false;
1730 
1731 	intel_runtime_pm_get(dev_priv);
1732 
1733 	if (HAS_PCH_SPLIT(dev))
1734 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1735 	else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
1736 		 IS_I945G(dev) || IS_I945GM(dev))
1737 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1738 	else if (IS_I915GM(dev))
1739 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1740 	else if (IS_PINEVIEW(dev))
1741 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1742 	else if (IS_VALLEYVIEW(dev))
1743 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1744 
1745 	intel_runtime_pm_put(dev_priv);
1746 
1747 	seq_printf(m, "self-refresh: %s\n",
1748 		   sr_enabled ? "enabled" : "disabled");
1749 
1750 	return 0;
1751 }
1752 
1753 static int i915_emon_status(struct seq_file *m, void *unused)
1754 {
1755 	struct drm_info_node *node = m->private;
1756 	struct drm_device *dev = node->minor->dev;
1757 	struct drm_i915_private *dev_priv = dev->dev_private;
1758 	unsigned long temp, chipset, gfx;
1759 	int ret;
1760 
1761 	if (!IS_GEN5(dev))
1762 		return -ENODEV;
1763 
1764 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1765 	if (ret)
1766 		return ret;
1767 
1768 	temp = i915_mch_val(dev_priv);
1769 	chipset = i915_chipset_val(dev_priv);
1770 	gfx = i915_gfx_val(dev_priv);
1771 	mutex_unlock(&dev->struct_mutex);
1772 
1773 	seq_printf(m, "GMCH temp: %ld\n", temp);
1774 	seq_printf(m, "Chipset power: %ld\n", chipset);
1775 	seq_printf(m, "GFX power: %ld\n", gfx);
1776 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1777 
1778 	return 0;
1779 }
1780 
1781 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1782 {
1783 	struct drm_info_node *node = m->private;
1784 	struct drm_device *dev = node->minor->dev;
1785 	struct drm_i915_private *dev_priv = dev->dev_private;
1786 	int ret = 0;
1787 	int gpu_freq, ia_freq;
1788 	unsigned int max_gpu_freq, min_gpu_freq;
1789 
1790 	if (!HAS_CORE_RING_FREQ(dev)) {
1791 		seq_puts(m, "unsupported on this chipset\n");
1792 		return 0;
1793 	}
1794 
1795 	intel_runtime_pm_get(dev_priv);
1796 
1797 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
1798 
1799 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1800 	if (ret)
1801 		goto out;
1802 
1803 	if (IS_SKYLAKE(dev)) {
1804 		/* Convert GT frequency to 50 HZ units */
1805 		min_gpu_freq =
1806 			dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
1807 		max_gpu_freq =
1808 			dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER;
1809 	} else {
1810 		min_gpu_freq = dev_priv->rps.min_freq_softlimit;
1811 		max_gpu_freq = dev_priv->rps.max_freq_softlimit;
1812 	}
1813 
1814 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1815 
1816 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1817 		ia_freq = gpu_freq;
1818 		sandybridge_pcode_read(dev_priv,
1819 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1820 				       &ia_freq);
1821 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1822 			   intel_gpu_freq(dev_priv, (gpu_freq *
1823 				(IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1))),
1824 			   ((ia_freq >> 0) & 0xff) * 100,
1825 			   ((ia_freq >> 8) & 0xff) * 100);
1826 	}
1827 
1828 	mutex_unlock(&dev_priv->rps.hw_lock);
1829 
1830 out:
1831 	intel_runtime_pm_put(dev_priv);
1832 	return ret;
1833 }
1834 
1835 static int i915_opregion(struct seq_file *m, void *unused)
1836 {
1837 	struct drm_info_node *node = m->private;
1838 	struct drm_device *dev = node->minor->dev;
1839 	struct drm_i915_private *dev_priv = dev->dev_private;
1840 	struct intel_opregion *opregion = &dev_priv->opregion;
1841 	void *data = kmalloc(OPREGION_SIZE, GFP_KERNEL);
1842 	int ret;
1843 
1844 	if (data == NULL)
1845 		return -ENOMEM;
1846 
1847 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1848 	if (ret)
1849 		goto out;
1850 
1851 	if (opregion->header) {
1852 		memcpy_fromio(data, opregion->header, OPREGION_SIZE);
1853 		seq_write(m, data, OPREGION_SIZE);
1854 	}
1855 
1856 	mutex_unlock(&dev->struct_mutex);
1857 
1858 out:
1859 	kfree(data);
1860 	return 0;
1861 }
1862 
1863 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1864 {
1865 	struct drm_info_node *node = m->private;
1866 	struct drm_device *dev = node->minor->dev;
1867 	struct intel_fbdev *ifbdev = NULL;
1868 	struct intel_framebuffer *fb;
1869 	struct drm_framebuffer *drm_fb;
1870 
1871 #ifdef CONFIG_DRM_FBDEV_EMULATION
1872 	struct drm_i915_private *dev_priv = dev->dev_private;
1873 
1874 	ifbdev = dev_priv->fbdev;
1875 	fb = to_intel_framebuffer(ifbdev->helper.fb);
1876 
1877 	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1878 		   fb->base.width,
1879 		   fb->base.height,
1880 		   fb->base.depth,
1881 		   fb->base.bits_per_pixel,
1882 		   fb->base.modifier[0],
1883 		   atomic_read(&fb->base.refcount.refcount));
1884 	describe_obj(m, fb->obj);
1885 	seq_putc(m, '\n');
1886 #endif
1887 
1888 	mutex_lock(&dev->mode_config.fb_lock);
1889 	drm_for_each_fb(drm_fb, dev) {
1890 		fb = to_intel_framebuffer(drm_fb);
1891 		if (ifbdev && &fb->base == ifbdev->helper.fb)
1892 			continue;
1893 
1894 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1895 			   fb->base.width,
1896 			   fb->base.height,
1897 			   fb->base.depth,
1898 			   fb->base.bits_per_pixel,
1899 			   fb->base.modifier[0],
1900 			   atomic_read(&fb->base.refcount.refcount));
1901 		describe_obj(m, fb->obj);
1902 		seq_putc(m, '\n');
1903 	}
1904 	mutex_unlock(&dev->mode_config.fb_lock);
1905 
1906 	return 0;
1907 }
1908 
1909 static void describe_ctx_ringbuf(struct seq_file *m,
1910 				 struct intel_ringbuffer *ringbuf)
1911 {
1912 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
1913 		   ringbuf->space, ringbuf->head, ringbuf->tail,
1914 		   ringbuf->last_retired_head);
1915 }
1916 
1917 static int i915_context_status(struct seq_file *m, void *unused)
1918 {
1919 	struct drm_info_node *node = m->private;
1920 	struct drm_device *dev = node->minor->dev;
1921 	struct drm_i915_private *dev_priv = dev->dev_private;
1922 	struct intel_engine_cs *ring;
1923 	struct intel_context *ctx;
1924 	int ret, i;
1925 
1926 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1927 	if (ret)
1928 		return ret;
1929 
1930 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
1931 		if (!i915.enable_execlists &&
1932 		    ctx->legacy_hw_ctx.rcs_state == NULL)
1933 			continue;
1934 
1935 		seq_puts(m, "HW context ");
1936 		describe_ctx(m, ctx);
1937 		for_each_ring(ring, dev_priv, i) {
1938 			if (ring->default_context == ctx)
1939 				seq_printf(m, "(default context %s) ",
1940 					   ring->name);
1941 		}
1942 
1943 		if (i915.enable_execlists) {
1944 			seq_putc(m, '\n');
1945 			for_each_ring(ring, dev_priv, i) {
1946 				struct drm_i915_gem_object *ctx_obj =
1947 					ctx->engine[i].state;
1948 				struct intel_ringbuffer *ringbuf =
1949 					ctx->engine[i].ringbuf;
1950 
1951 				seq_printf(m, "%s: ", ring->name);
1952 				if (ctx_obj)
1953 					describe_obj(m, ctx_obj);
1954 				if (ringbuf)
1955 					describe_ctx_ringbuf(m, ringbuf);
1956 				seq_putc(m, '\n');
1957 			}
1958 		} else {
1959 			describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
1960 		}
1961 
1962 		seq_putc(m, '\n');
1963 	}
1964 
1965 	mutex_unlock(&dev->struct_mutex);
1966 
1967 	return 0;
1968 }
1969 
1970 static void i915_dump_lrc_obj(struct seq_file *m,
1971 			      struct intel_engine_cs *ring,
1972 			      struct drm_i915_gem_object *ctx_obj)
1973 {
1974 	struct page *page;
1975 	uint32_t *reg_state;
1976 	int j;
1977 	unsigned long ggtt_offset = 0;
1978 
1979 	if (ctx_obj == NULL) {
1980 		seq_printf(m, "Context on %s with no gem object\n",
1981 			   ring->name);
1982 		return;
1983 	}
1984 
1985 	seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1986 		   intel_execlists_ctx_id(ctx_obj));
1987 
1988 	if (!i915_gem_obj_ggtt_bound(ctx_obj))
1989 		seq_puts(m, "\tNot bound in GGTT\n");
1990 	else
1991 		ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
1992 
1993 	if (i915_gem_object_get_pages(ctx_obj)) {
1994 		seq_puts(m, "\tFailed to get pages for context object\n");
1995 		return;
1996 	}
1997 
1998 	page = i915_gem_object_get_page(ctx_obj, 1);
1999 	if (!WARN_ON(page == NULL)) {
2000 		reg_state = kmap_atomic(page);
2001 
2002 		for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
2003 			seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
2004 				   ggtt_offset + 4096 + (j * 4),
2005 				   reg_state[j], reg_state[j + 1],
2006 				   reg_state[j + 2], reg_state[j + 3]);
2007 		}
2008 		kunmap_atomic(reg_state);
2009 	}
2010 
2011 	seq_putc(m, '\n');
2012 }
2013 
2014 static int i915_dump_lrc(struct seq_file *m, void *unused)
2015 {
2016 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2017 	struct drm_device *dev = node->minor->dev;
2018 	struct drm_i915_private *dev_priv = dev->dev_private;
2019 	struct intel_engine_cs *ring;
2020 	struct intel_context *ctx;
2021 	int ret, i;
2022 
2023 	if (!i915.enable_execlists) {
2024 		seq_printf(m, "Logical Ring Contexts are disabled\n");
2025 		return 0;
2026 	}
2027 
2028 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2029 	if (ret)
2030 		return ret;
2031 
2032 	list_for_each_entry(ctx, &dev_priv->context_list, link) {
2033 		for_each_ring(ring, dev_priv, i) {
2034 			if (ring->default_context != ctx)
2035 				i915_dump_lrc_obj(m, ring,
2036 						  ctx->engine[i].state);
2037 		}
2038 	}
2039 
2040 	mutex_unlock(&dev->struct_mutex);
2041 
2042 	return 0;
2043 }
2044 
2045 static int i915_execlists(struct seq_file *m, void *data)
2046 {
2047 	struct drm_info_node *node = (struct drm_info_node *)m->private;
2048 	struct drm_device *dev = node->minor->dev;
2049 	struct drm_i915_private *dev_priv = dev->dev_private;
2050 	struct intel_engine_cs *ring;
2051 	u32 status_pointer;
2052 	u8 read_pointer;
2053 	u8 write_pointer;
2054 	u32 status;
2055 	u32 ctx_id;
2056 	struct list_head *cursor;
2057 	int ring_id, i;
2058 	int ret;
2059 
2060 	if (!i915.enable_execlists) {
2061 		seq_puts(m, "Logical Ring Contexts are disabled\n");
2062 		return 0;
2063 	}
2064 
2065 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2066 	if (ret)
2067 		return ret;
2068 
2069 	intel_runtime_pm_get(dev_priv);
2070 
2071 	for_each_ring(ring, dev_priv, ring_id) {
2072 		struct drm_i915_gem_request *head_req = NULL;
2073 		int count = 0;
2074 		unsigned long flags;
2075 
2076 		seq_printf(m, "%s\n", ring->name);
2077 
2078 		status = I915_READ(RING_EXECLIST_STATUS(ring));
2079 		ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4);
2080 		seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
2081 			   status, ctx_id);
2082 
2083 		status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
2084 		seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
2085 
2086 		read_pointer = ring->next_context_status_buffer;
2087 		write_pointer = status_pointer & 0x07;
2088 		if (read_pointer > write_pointer)
2089 			write_pointer += 6;
2090 		seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
2091 			   read_pointer, write_pointer);
2092 
2093 		for (i = 0; i < 6; i++) {
2094 			status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i);
2095 			ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4);
2096 
2097 			seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
2098 				   i, status, ctx_id);
2099 		}
2100 
2101 		spin_lock_irqsave(&ring->execlist_lock, flags);
2102 		list_for_each(cursor, &ring->execlist_queue)
2103 			count++;
2104 		head_req = list_first_entry_or_null(&ring->execlist_queue,
2105 				struct drm_i915_gem_request, execlist_link);
2106 		spin_unlock_irqrestore(&ring->execlist_lock, flags);
2107 
2108 		seq_printf(m, "\t%d requests in queue\n", count);
2109 		if (head_req) {
2110 			struct drm_i915_gem_object *ctx_obj;
2111 
2112 			ctx_obj = head_req->ctx->engine[ring_id].state;
2113 			seq_printf(m, "\tHead request id: %u\n",
2114 				   intel_execlists_ctx_id(ctx_obj));
2115 			seq_printf(m, "\tHead request tail: %u\n",
2116 				   head_req->tail);
2117 		}
2118 
2119 		seq_putc(m, '\n');
2120 	}
2121 
2122 	intel_runtime_pm_put(dev_priv);
2123 	mutex_unlock(&dev->struct_mutex);
2124 
2125 	return 0;
2126 }
2127 
2128 static const char *swizzle_string(unsigned swizzle)
2129 {
2130 	switch (swizzle) {
2131 	case I915_BIT_6_SWIZZLE_NONE:
2132 		return "none";
2133 	case I915_BIT_6_SWIZZLE_9:
2134 		return "bit9";
2135 	case I915_BIT_6_SWIZZLE_9_10:
2136 		return "bit9/bit10";
2137 	case I915_BIT_6_SWIZZLE_9_11:
2138 		return "bit9/bit11";
2139 	case I915_BIT_6_SWIZZLE_9_10_11:
2140 		return "bit9/bit10/bit11";
2141 	case I915_BIT_6_SWIZZLE_9_17:
2142 		return "bit9/bit17";
2143 	case I915_BIT_6_SWIZZLE_9_10_17:
2144 		return "bit9/bit10/bit17";
2145 	case I915_BIT_6_SWIZZLE_UNKNOWN:
2146 		return "unknown";
2147 	}
2148 
2149 	return "bug";
2150 }
2151 
2152 static int i915_swizzle_info(struct seq_file *m, void *data)
2153 {
2154 	struct drm_info_node *node = m->private;
2155 	struct drm_device *dev = node->minor->dev;
2156 	struct drm_i915_private *dev_priv = dev->dev_private;
2157 	int ret;
2158 
2159 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2160 	if (ret)
2161 		return ret;
2162 	intel_runtime_pm_get(dev_priv);
2163 
2164 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2165 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2166 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2167 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2168 
2169 	if (IS_GEN3(dev) || IS_GEN4(dev)) {
2170 		seq_printf(m, "DDC = 0x%08x\n",
2171 			   I915_READ(DCC));
2172 		seq_printf(m, "DDC2 = 0x%08x\n",
2173 			   I915_READ(DCC2));
2174 		seq_printf(m, "C0DRB3 = 0x%04x\n",
2175 			   I915_READ16(C0DRB3));
2176 		seq_printf(m, "C1DRB3 = 0x%04x\n",
2177 			   I915_READ16(C1DRB3));
2178 	} else if (INTEL_INFO(dev)->gen >= 6) {
2179 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2180 			   I915_READ(MAD_DIMM_C0));
2181 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2182 			   I915_READ(MAD_DIMM_C1));
2183 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2184 			   I915_READ(MAD_DIMM_C2));
2185 		seq_printf(m, "TILECTL = 0x%08x\n",
2186 			   I915_READ(TILECTL));
2187 		if (INTEL_INFO(dev)->gen >= 8)
2188 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2189 				   I915_READ(GAMTARBMODE));
2190 		else
2191 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2192 				   I915_READ(ARB_MODE));
2193 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2194 			   I915_READ(DISP_ARB_CTL));
2195 	}
2196 
2197 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2198 		seq_puts(m, "L-shaped memory detected\n");
2199 
2200 	intel_runtime_pm_put(dev_priv);
2201 	mutex_unlock(&dev->struct_mutex);
2202 
2203 	return 0;
2204 }
2205 
2206 static int per_file_ctx(int id, void *ptr, void *data)
2207 {
2208 	struct intel_context *ctx = ptr;
2209 	struct seq_file *m = data;
2210 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2211 
2212 	if (!ppgtt) {
2213 		seq_printf(m, "  no ppgtt for context %d\n",
2214 			   ctx->user_handle);
2215 		return 0;
2216 	}
2217 
2218 	if (i915_gem_context_is_default(ctx))
2219 		seq_puts(m, "  default context:\n");
2220 	else
2221 		seq_printf(m, "  context %d:\n", ctx->user_handle);
2222 	ppgtt->debug_dump(ppgtt, m);
2223 
2224 	return 0;
2225 }
2226 
2227 static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2228 {
2229 	struct drm_i915_private *dev_priv = dev->dev_private;
2230 	struct intel_engine_cs *ring;
2231 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2232 	int unused, i;
2233 
2234 	if (!ppgtt)
2235 		return;
2236 
2237 	for_each_ring(ring, dev_priv, unused) {
2238 		seq_printf(m, "%s\n", ring->name);
2239 		for (i = 0; i < 4; i++) {
2240 			u32 offset = 0x270 + i * 8;
2241 			u64 pdp = I915_READ(ring->mmio_base + offset + 4);
2242 			pdp <<= 32;
2243 			pdp |= I915_READ(ring->mmio_base + offset);
2244 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2245 		}
2246 	}
2247 }
2248 
2249 static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2250 {
2251 	struct drm_i915_private *dev_priv = dev->dev_private;
2252 	struct intel_engine_cs *ring;
2253 	struct drm_file *file;
2254 	int i;
2255 
2256 	if (INTEL_INFO(dev)->gen == 6)
2257 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2258 
2259 	for_each_ring(ring, dev_priv, i) {
2260 		seq_printf(m, "%s\n", ring->name);
2261 		if (INTEL_INFO(dev)->gen == 7)
2262 			seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring)));
2263 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring)));
2264 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring)));
2265 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring)));
2266 	}
2267 	if (dev_priv->mm.aliasing_ppgtt) {
2268 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2269 
2270 		seq_puts(m, "aliasing PPGTT:\n");
2271 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2272 
2273 		ppgtt->debug_dump(ppgtt, m);
2274 	}
2275 
2276 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2277 		struct drm_i915_file_private *file_priv = file->driver_priv;
2278 
2279 		seq_printf(m, "proc: %s\n",
2280 			   get_pid_task(file->pid, PIDTYPE_PID)->comm);
2281 		idr_for_each(&file_priv->context_idr, per_file_ctx, m);
2282 	}
2283 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2284 }
2285 
2286 static int i915_ppgtt_info(struct seq_file *m, void *data)
2287 {
2288 	struct drm_info_node *node = m->private;
2289 	struct drm_device *dev = node->minor->dev;
2290 	struct drm_i915_private *dev_priv = dev->dev_private;
2291 
2292 	int ret = mutex_lock_interruptible(&dev->struct_mutex);
2293 	if (ret)
2294 		return ret;
2295 	intel_runtime_pm_get(dev_priv);
2296 
2297 	if (INTEL_INFO(dev)->gen >= 8)
2298 		gen8_ppgtt_info(m, dev);
2299 	else if (INTEL_INFO(dev)->gen >= 6)
2300 		gen6_ppgtt_info(m, dev);
2301 
2302 	intel_runtime_pm_put(dev_priv);
2303 	mutex_unlock(&dev->struct_mutex);
2304 
2305 	return 0;
2306 }
2307 
2308 static int count_irq_waiters(struct drm_i915_private *i915)
2309 {
2310 	struct intel_engine_cs *ring;
2311 	int count = 0;
2312 	int i;
2313 
2314 	for_each_ring(ring, i915, i)
2315 		count += ring->irq_refcount;
2316 
2317 	return count;
2318 }
2319 
2320 static int i915_rps_boost_info(struct seq_file *m, void *data)
2321 {
2322 	struct drm_info_node *node = m->private;
2323 	struct drm_device *dev = node->minor->dev;
2324 	struct drm_i915_private *dev_priv = dev->dev_private;
2325 	struct drm_file *file;
2326 
2327 	seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2328 	seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy);
2329 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2330 	seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2331 		   intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
2332 		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
2333 		   intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit),
2334 		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit),
2335 		   intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
2336 	spin_lock(&dev_priv->rps.client_lock);
2337 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2338 		struct drm_i915_file_private *file_priv = file->driver_priv;
2339 		struct task_struct *task;
2340 
2341 		rcu_read_lock();
2342 		task = pid_task(file->pid, PIDTYPE_PID);
2343 		seq_printf(m, "%s [%d]: %d boosts%s\n",
2344 			   task ? task->comm : "<unknown>",
2345 			   task ? task->pid : -1,
2346 			   file_priv->rps.boosts,
2347 			   list_empty(&file_priv->rps.link) ? "" : ", active");
2348 		rcu_read_unlock();
2349 	}
2350 	seq_printf(m, "Semaphore boosts: %d%s\n",
2351 		   dev_priv->rps.semaphores.boosts,
2352 		   list_empty(&dev_priv->rps.semaphores.link) ? "" : ", active");
2353 	seq_printf(m, "MMIO flip boosts: %d%s\n",
2354 		   dev_priv->rps.mmioflips.boosts,
2355 		   list_empty(&dev_priv->rps.mmioflips.link) ? "" : ", active");
2356 	seq_printf(m, "Kernel boosts: %d\n", dev_priv->rps.boosts);
2357 	spin_unlock(&dev_priv->rps.client_lock);
2358 
2359 	return 0;
2360 }
2361 
2362 static int i915_llc(struct seq_file *m, void *data)
2363 {
2364 	struct drm_info_node *node = m->private;
2365 	struct drm_device *dev = node->minor->dev;
2366 	struct drm_i915_private *dev_priv = dev->dev_private;
2367 
2368 	/* Size calculation for LLC is a bit of a pain. Ignore for now. */
2369 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
2370 	seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
2371 
2372 	return 0;
2373 }
2374 
2375 static int i915_edp_psr_status(struct seq_file *m, void *data)
2376 {
2377 	struct drm_info_node *node = m->private;
2378 	struct drm_device *dev = node->minor->dev;
2379 	struct drm_i915_private *dev_priv = dev->dev_private;
2380 	u32 psrperf = 0;
2381 	u32 stat[3];
2382 	enum pipe pipe;
2383 	bool enabled = false;
2384 
2385 	if (!HAS_PSR(dev)) {
2386 		seq_puts(m, "PSR not supported\n");
2387 		return 0;
2388 	}
2389 
2390 	intel_runtime_pm_get(dev_priv);
2391 
2392 	mutex_lock(&dev_priv->psr.lock);
2393 	seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
2394 	seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
2395 	seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
2396 	seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
2397 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2398 		   dev_priv->psr.busy_frontbuffer_bits);
2399 	seq_printf(m, "Re-enable work scheduled: %s\n",
2400 		   yesno(work_busy(&dev_priv->psr.work.work)));
2401 
2402 	if (HAS_DDI(dev))
2403 		enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
2404 	else {
2405 		for_each_pipe(dev_priv, pipe) {
2406 			stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
2407 				VLV_EDP_PSR_CURR_STATE_MASK;
2408 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2409 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2410 				enabled = true;
2411 		}
2412 	}
2413 	seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
2414 
2415 	if (!HAS_DDI(dev))
2416 		for_each_pipe(dev_priv, pipe) {
2417 			if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
2418 			    (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
2419 				seq_printf(m, " pipe %c", pipe_name(pipe));
2420 		}
2421 	seq_puts(m, "\n");
2422 
2423 	/* CHV PSR has no kind of performance counter */
2424 	if (HAS_DDI(dev)) {
2425 		psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
2426 			EDP_PSR_PERF_CNT_MASK;
2427 
2428 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2429 	}
2430 	mutex_unlock(&dev_priv->psr.lock);
2431 
2432 	intel_runtime_pm_put(dev_priv);
2433 	return 0;
2434 }
2435 
2436 static int i915_sink_crc(struct seq_file *m, void *data)
2437 {
2438 	struct drm_info_node *node = m->private;
2439 	struct drm_device *dev = node->minor->dev;
2440 	struct intel_encoder *encoder;
2441 	struct intel_connector *connector;
2442 	struct intel_dp *intel_dp = NULL;
2443 	int ret;
2444 	u8 crc[6];
2445 
2446 	drm_modeset_lock_all(dev);
2447 	for_each_intel_connector(dev, connector) {
2448 
2449 		if (connector->base.dpms != DRM_MODE_DPMS_ON)
2450 			continue;
2451 
2452 		if (!connector->base.encoder)
2453 			continue;
2454 
2455 		encoder = to_intel_encoder(connector->base.encoder);
2456 		if (encoder->type != INTEL_OUTPUT_EDP)
2457 			continue;
2458 
2459 		intel_dp = enc_to_intel_dp(&encoder->base);
2460 
2461 		ret = intel_dp_sink_crc(intel_dp, crc);
2462 		if (ret)
2463 			goto out;
2464 
2465 		seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
2466 			   crc[0], crc[1], crc[2],
2467 			   crc[3], crc[4], crc[5]);
2468 		goto out;
2469 	}
2470 	ret = -ENODEV;
2471 out:
2472 	drm_modeset_unlock_all(dev);
2473 	return ret;
2474 }
2475 
2476 static int i915_energy_uJ(struct seq_file *m, void *data)
2477 {
2478 	struct drm_info_node *node = m->private;
2479 	struct drm_device *dev = node->minor->dev;
2480 	struct drm_i915_private *dev_priv = dev->dev_private;
2481 	u64 power;
2482 	u32 units;
2483 
2484 	if (INTEL_INFO(dev)->gen < 6)
2485 		return -ENODEV;
2486 
2487 	intel_runtime_pm_get(dev_priv);
2488 
2489 	rdmsrl(MSR_RAPL_POWER_UNIT, power);
2490 	power = (power & 0x1f00) >> 8;
2491 	units = 1000000 / (1 << power); /* convert to uJ */
2492 	power = I915_READ(MCH_SECP_NRG_STTS);
2493 	power *= units;
2494 
2495 	intel_runtime_pm_put(dev_priv);
2496 
2497 	seq_printf(m, "%llu", (long long unsigned)power);
2498 
2499 	return 0;
2500 }
2501 
2502 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2503 {
2504 	struct drm_info_node *node = m->private;
2505 	struct drm_device *dev = node->minor->dev;
2506 	struct drm_i915_private *dev_priv = dev->dev_private;
2507 
2508 	if (!HAS_RUNTIME_PM(dev)) {
2509 		seq_puts(m, "not supported\n");
2510 		return 0;
2511 	}
2512 
2513 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
2514 	seq_printf(m, "IRQs disabled: %s\n",
2515 		   yesno(!intel_irqs_enabled(dev_priv)));
2516 #ifdef CONFIG_PM
2517 	seq_printf(m, "Usage count: %d\n",
2518 		   atomic_read(&dev->dev->power.usage_count));
2519 #else
2520 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2521 #endif
2522 
2523 	return 0;
2524 }
2525 
2526 static const char *power_domain_str(enum intel_display_power_domain domain)
2527 {
2528 	switch (domain) {
2529 	case POWER_DOMAIN_PIPE_A:
2530 		return "PIPE_A";
2531 	case POWER_DOMAIN_PIPE_B:
2532 		return "PIPE_B";
2533 	case POWER_DOMAIN_PIPE_C:
2534 		return "PIPE_C";
2535 	case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
2536 		return "PIPE_A_PANEL_FITTER";
2537 	case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
2538 		return "PIPE_B_PANEL_FITTER";
2539 	case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
2540 		return "PIPE_C_PANEL_FITTER";
2541 	case POWER_DOMAIN_TRANSCODER_A:
2542 		return "TRANSCODER_A";
2543 	case POWER_DOMAIN_TRANSCODER_B:
2544 		return "TRANSCODER_B";
2545 	case POWER_DOMAIN_TRANSCODER_C:
2546 		return "TRANSCODER_C";
2547 	case POWER_DOMAIN_TRANSCODER_EDP:
2548 		return "TRANSCODER_EDP";
2549 	case POWER_DOMAIN_PORT_DDI_A_2_LANES:
2550 		return "PORT_DDI_A_2_LANES";
2551 	case POWER_DOMAIN_PORT_DDI_A_4_LANES:
2552 		return "PORT_DDI_A_4_LANES";
2553 	case POWER_DOMAIN_PORT_DDI_B_2_LANES:
2554 		return "PORT_DDI_B_2_LANES";
2555 	case POWER_DOMAIN_PORT_DDI_B_4_LANES:
2556 		return "PORT_DDI_B_4_LANES";
2557 	case POWER_DOMAIN_PORT_DDI_C_2_LANES:
2558 		return "PORT_DDI_C_2_LANES";
2559 	case POWER_DOMAIN_PORT_DDI_C_4_LANES:
2560 		return "PORT_DDI_C_4_LANES";
2561 	case POWER_DOMAIN_PORT_DDI_D_2_LANES:
2562 		return "PORT_DDI_D_2_LANES";
2563 	case POWER_DOMAIN_PORT_DDI_D_4_LANES:
2564 		return "PORT_DDI_D_4_LANES";
2565 	case POWER_DOMAIN_PORT_DSI:
2566 		return "PORT_DSI";
2567 	case POWER_DOMAIN_PORT_CRT:
2568 		return "PORT_CRT";
2569 	case POWER_DOMAIN_PORT_OTHER:
2570 		return "PORT_OTHER";
2571 	case POWER_DOMAIN_VGA:
2572 		return "VGA";
2573 	case POWER_DOMAIN_AUDIO:
2574 		return "AUDIO";
2575 	case POWER_DOMAIN_PLLS:
2576 		return "PLLS";
2577 	case POWER_DOMAIN_AUX_A:
2578 		return "AUX_A";
2579 	case POWER_DOMAIN_AUX_B:
2580 		return "AUX_B";
2581 	case POWER_DOMAIN_AUX_C:
2582 		return "AUX_C";
2583 	case POWER_DOMAIN_AUX_D:
2584 		return "AUX_D";
2585 	case POWER_DOMAIN_INIT:
2586 		return "INIT";
2587 	default:
2588 		MISSING_CASE(domain);
2589 		return "?";
2590 	}
2591 }
2592 
2593 static int i915_power_domain_info(struct seq_file *m, void *unused)
2594 {
2595 	struct drm_info_node *node = m->private;
2596 	struct drm_device *dev = node->minor->dev;
2597 	struct drm_i915_private *dev_priv = dev->dev_private;
2598 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2599 	int i;
2600 
2601 	mutex_lock(&power_domains->lock);
2602 
2603 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2604 	for (i = 0; i < power_domains->power_well_count; i++) {
2605 		struct i915_power_well *power_well;
2606 		enum intel_display_power_domain power_domain;
2607 
2608 		power_well = &power_domains->power_wells[i];
2609 		seq_printf(m, "%-25s %d\n", power_well->name,
2610 			   power_well->count);
2611 
2612 		for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
2613 		     power_domain++) {
2614 			if (!(BIT(power_domain) & power_well->domains))
2615 				continue;
2616 
2617 			seq_printf(m, "  %-23s %d\n",
2618 				 power_domain_str(power_domain),
2619 				 power_domains->domain_use_count[power_domain]);
2620 		}
2621 	}
2622 
2623 	mutex_unlock(&power_domains->lock);
2624 
2625 	return 0;
2626 }
2627 
2628 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2629 				 struct drm_display_mode *mode)
2630 {
2631 	int i;
2632 
2633 	for (i = 0; i < tabs; i++)
2634 		seq_putc(m, '\t');
2635 
2636 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2637 		   mode->base.id, mode->name,
2638 		   mode->vrefresh, mode->clock,
2639 		   mode->hdisplay, mode->hsync_start,
2640 		   mode->hsync_end, mode->htotal,
2641 		   mode->vdisplay, mode->vsync_start,
2642 		   mode->vsync_end, mode->vtotal,
2643 		   mode->type, mode->flags);
2644 }
2645 
2646 static void intel_encoder_info(struct seq_file *m,
2647 			       struct intel_crtc *intel_crtc,
2648 			       struct intel_encoder *intel_encoder)
2649 {
2650 	struct drm_info_node *node = m->private;
2651 	struct drm_device *dev = node->minor->dev;
2652 	struct drm_crtc *crtc = &intel_crtc->base;
2653 	struct intel_connector *intel_connector;
2654 	struct drm_encoder *encoder;
2655 
2656 	encoder = &intel_encoder->base;
2657 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2658 		   encoder->base.id, encoder->name);
2659 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2660 		struct drm_connector *connector = &intel_connector->base;
2661 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2662 			   connector->base.id,
2663 			   connector->name,
2664 			   drm_get_connector_status_name(connector->status));
2665 		if (connector->status == connector_status_connected) {
2666 			struct drm_display_mode *mode = &crtc->mode;
2667 			seq_printf(m, ", mode:\n");
2668 			intel_seq_print_mode(m, 2, mode);
2669 		} else {
2670 			seq_putc(m, '\n');
2671 		}
2672 	}
2673 }
2674 
2675 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2676 {
2677 	struct drm_info_node *node = m->private;
2678 	struct drm_device *dev = node->minor->dev;
2679 	struct drm_crtc *crtc = &intel_crtc->base;
2680 	struct intel_encoder *intel_encoder;
2681 
2682 	if (crtc->primary->fb)
2683 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2684 			   crtc->primary->fb->base.id, crtc->x, crtc->y,
2685 			   crtc->primary->fb->width, crtc->primary->fb->height);
2686 	else
2687 		seq_puts(m, "\tprimary plane disabled\n");
2688 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2689 		intel_encoder_info(m, intel_crtc, intel_encoder);
2690 }
2691 
2692 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2693 {
2694 	struct drm_display_mode *mode = panel->fixed_mode;
2695 
2696 	seq_printf(m, "\tfixed mode:\n");
2697 	intel_seq_print_mode(m, 2, mode);
2698 }
2699 
2700 static void intel_dp_info(struct seq_file *m,
2701 			  struct intel_connector *intel_connector)
2702 {
2703 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2704 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2705 
2706 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2707 	seq_printf(m, "\taudio support: %s\n", intel_dp->has_audio ? "yes" :
2708 		   "no");
2709 	if (intel_encoder->type == INTEL_OUTPUT_EDP)
2710 		intel_panel_info(m, &intel_connector->panel);
2711 }
2712 
2713 static void intel_hdmi_info(struct seq_file *m,
2714 			    struct intel_connector *intel_connector)
2715 {
2716 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2717 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2718 
2719 	seq_printf(m, "\taudio support: %s\n", intel_hdmi->has_audio ? "yes" :
2720 		   "no");
2721 }
2722 
2723 static void intel_lvds_info(struct seq_file *m,
2724 			    struct intel_connector *intel_connector)
2725 {
2726 	intel_panel_info(m, &intel_connector->panel);
2727 }
2728 
2729 static void intel_connector_info(struct seq_file *m,
2730 				 struct drm_connector *connector)
2731 {
2732 	struct intel_connector *intel_connector = to_intel_connector(connector);
2733 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2734 	struct drm_display_mode *mode;
2735 
2736 	seq_printf(m, "connector %d: type %s, status: %s\n",
2737 		   connector->base.id, connector->name,
2738 		   drm_get_connector_status_name(connector->status));
2739 	if (connector->status == connector_status_connected) {
2740 		seq_printf(m, "\tname: %s\n", connector->display_info.name);
2741 		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2742 			   connector->display_info.width_mm,
2743 			   connector->display_info.height_mm);
2744 		seq_printf(m, "\tsubpixel order: %s\n",
2745 			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2746 		seq_printf(m, "\tCEA rev: %d\n",
2747 			   connector->display_info.cea_rev);
2748 	}
2749 	if (intel_encoder) {
2750 		if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2751 		    intel_encoder->type == INTEL_OUTPUT_EDP)
2752 			intel_dp_info(m, intel_connector);
2753 		else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
2754 			intel_hdmi_info(m, intel_connector);
2755 		else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2756 			intel_lvds_info(m, intel_connector);
2757 	}
2758 
2759 	seq_printf(m, "\tmodes:\n");
2760 	list_for_each_entry(mode, &connector->modes, head)
2761 		intel_seq_print_mode(m, 2, mode);
2762 }
2763 
2764 static bool cursor_active(struct drm_device *dev, int pipe)
2765 {
2766 	struct drm_i915_private *dev_priv = dev->dev_private;
2767 	u32 state;
2768 
2769 	if (IS_845G(dev) || IS_I865G(dev))
2770 		state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
2771 	else
2772 		state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
2773 
2774 	return state;
2775 }
2776 
2777 static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2778 {
2779 	struct drm_i915_private *dev_priv = dev->dev_private;
2780 	u32 pos;
2781 
2782 	pos = I915_READ(CURPOS(pipe));
2783 
2784 	*x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
2785 	if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
2786 		*x = -*x;
2787 
2788 	*y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
2789 	if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
2790 		*y = -*y;
2791 
2792 	return cursor_active(dev, pipe);
2793 }
2794 
2795 static int i915_display_info(struct seq_file *m, void *unused)
2796 {
2797 	struct drm_info_node *node = m->private;
2798 	struct drm_device *dev = node->minor->dev;
2799 	struct drm_i915_private *dev_priv = dev->dev_private;
2800 	struct intel_crtc *crtc;
2801 	struct drm_connector *connector;
2802 
2803 	intel_runtime_pm_get(dev_priv);
2804 	drm_modeset_lock_all(dev);
2805 	seq_printf(m, "CRTC info\n");
2806 	seq_printf(m, "---------\n");
2807 	for_each_intel_crtc(dev, crtc) {
2808 		bool active;
2809 		struct intel_crtc_state *pipe_config;
2810 		int x, y;
2811 
2812 		pipe_config = to_intel_crtc_state(crtc->base.state);
2813 
2814 		seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
2815 			   crtc->base.base.id, pipe_name(crtc->pipe),
2816 			   yesno(pipe_config->base.active),
2817 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h);
2818 		if (pipe_config->base.active) {
2819 			intel_crtc_info(m, crtc);
2820 
2821 			active = cursor_position(dev, crtc->pipe, &x, &y);
2822 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
2823 				   yesno(crtc->cursor_base),
2824 				   x, y, crtc->base.cursor->state->crtc_w,
2825 				   crtc->base.cursor->state->crtc_h,
2826 				   crtc->cursor_addr, yesno(active));
2827 		}
2828 
2829 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2830 			   yesno(!crtc->cpu_fifo_underrun_disabled),
2831 			   yesno(!crtc->pch_fifo_underrun_disabled));
2832 	}
2833 
2834 	seq_printf(m, "\n");
2835 	seq_printf(m, "Connector info\n");
2836 	seq_printf(m, "--------------\n");
2837 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2838 		intel_connector_info(m, connector);
2839 	}
2840 	drm_modeset_unlock_all(dev);
2841 	intel_runtime_pm_put(dev_priv);
2842 
2843 	return 0;
2844 }
2845 
2846 static int i915_semaphore_status(struct seq_file *m, void *unused)
2847 {
2848 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2849 	struct drm_device *dev = node->minor->dev;
2850 	struct drm_i915_private *dev_priv = dev->dev_private;
2851 	struct intel_engine_cs *ring;
2852 	int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
2853 	int i, j, ret;
2854 
2855 	if (!i915_semaphore_is_enabled(dev)) {
2856 		seq_puts(m, "Semaphores are disabled\n");
2857 		return 0;
2858 	}
2859 
2860 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2861 	if (ret)
2862 		return ret;
2863 	intel_runtime_pm_get(dev_priv);
2864 
2865 	if (IS_BROADWELL(dev)) {
2866 		struct page *page;
2867 		uint64_t *seqno;
2868 
2869 		page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
2870 
2871 		seqno = (uint64_t *)kmap_atomic(page);
2872 		for_each_ring(ring, dev_priv, i) {
2873 			uint64_t offset;
2874 
2875 			seq_printf(m, "%s\n", ring->name);
2876 
2877 			seq_puts(m, "  Last signal:");
2878 			for (j = 0; j < num_rings; j++) {
2879 				offset = i * I915_NUM_RINGS + j;
2880 				seq_printf(m, "0x%08llx (0x%02llx) ",
2881 					   seqno[offset], offset * 8);
2882 			}
2883 			seq_putc(m, '\n');
2884 
2885 			seq_puts(m, "  Last wait:  ");
2886 			for (j = 0; j < num_rings; j++) {
2887 				offset = i + (j * I915_NUM_RINGS);
2888 				seq_printf(m, "0x%08llx (0x%02llx) ",
2889 					   seqno[offset], offset * 8);
2890 			}
2891 			seq_putc(m, '\n');
2892 
2893 		}
2894 		kunmap_atomic(seqno);
2895 	} else {
2896 		seq_puts(m, "  Last signal:");
2897 		for_each_ring(ring, dev_priv, i)
2898 			for (j = 0; j < num_rings; j++)
2899 				seq_printf(m, "0x%08x\n",
2900 					   I915_READ(ring->semaphore.mbox.signal[j]));
2901 		seq_putc(m, '\n');
2902 	}
2903 
2904 	seq_puts(m, "\nSync seqno:\n");
2905 	for_each_ring(ring, dev_priv, i) {
2906 		for (j = 0; j < num_rings; j++) {
2907 			seq_printf(m, "  0x%08x ", ring->semaphore.sync_seqno[j]);
2908 		}
2909 		seq_putc(m, '\n');
2910 	}
2911 	seq_putc(m, '\n');
2912 
2913 	intel_runtime_pm_put(dev_priv);
2914 	mutex_unlock(&dev->struct_mutex);
2915 	return 0;
2916 }
2917 
2918 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2919 {
2920 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2921 	struct drm_device *dev = node->minor->dev;
2922 	struct drm_i915_private *dev_priv = dev->dev_private;
2923 	int i;
2924 
2925 	drm_modeset_lock_all(dev);
2926 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2927 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2928 
2929 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2930 		seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
2931 			   pll->config.crtc_mask, pll->active, yesno(pll->on));
2932 		seq_printf(m, " tracked hardware state:\n");
2933 		seq_printf(m, " dpll:    0x%08x\n", pll->config.hw_state.dpll);
2934 		seq_printf(m, " dpll_md: 0x%08x\n",
2935 			   pll->config.hw_state.dpll_md);
2936 		seq_printf(m, " fp0:     0x%08x\n", pll->config.hw_state.fp0);
2937 		seq_printf(m, " fp1:     0x%08x\n", pll->config.hw_state.fp1);
2938 		seq_printf(m, " wrpll:   0x%08x\n", pll->config.hw_state.wrpll);
2939 	}
2940 	drm_modeset_unlock_all(dev);
2941 
2942 	return 0;
2943 }
2944 
2945 static int i915_wa_registers(struct seq_file *m, void *unused)
2946 {
2947 	int i;
2948 	int ret;
2949 	struct drm_info_node *node = (struct drm_info_node *) m->private;
2950 	struct drm_device *dev = node->minor->dev;
2951 	struct drm_i915_private *dev_priv = dev->dev_private;
2952 
2953 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2954 	if (ret)
2955 		return ret;
2956 
2957 	intel_runtime_pm_get(dev_priv);
2958 
2959 	seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
2960 	for (i = 0; i < dev_priv->workarounds.count; ++i) {
2961 		u32 addr, mask, value, read;
2962 		bool ok;
2963 
2964 		addr = dev_priv->workarounds.reg[i].addr;
2965 		mask = dev_priv->workarounds.reg[i].mask;
2966 		value = dev_priv->workarounds.reg[i].value;
2967 		read = I915_READ(addr);
2968 		ok = (value & mask) == (read & mask);
2969 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
2970 			   addr, value, mask, read, ok ? "OK" : "FAIL");
2971 	}
2972 
2973 	intel_runtime_pm_put(dev_priv);
2974 	mutex_unlock(&dev->struct_mutex);
2975 
2976 	return 0;
2977 }
2978 
2979 static int i915_ddb_info(struct seq_file *m, void *unused)
2980 {
2981 	struct drm_info_node *node = m->private;
2982 	struct drm_device *dev = node->minor->dev;
2983 	struct drm_i915_private *dev_priv = dev->dev_private;
2984 	struct skl_ddb_allocation *ddb;
2985 	struct skl_ddb_entry *entry;
2986 	enum pipe pipe;
2987 	int plane;
2988 
2989 	if (INTEL_INFO(dev)->gen < 9)
2990 		return 0;
2991 
2992 	drm_modeset_lock_all(dev);
2993 
2994 	ddb = &dev_priv->wm.skl_hw.ddb;
2995 
2996 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2997 
2998 	for_each_pipe(dev_priv, pipe) {
2999 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3000 
3001 		for_each_plane(dev_priv, pipe, plane) {
3002 			entry = &ddb->plane[pipe][plane];
3003 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3004 				   entry->start, entry->end,
3005 				   skl_ddb_entry_size(entry));
3006 		}
3007 
3008 		entry = &ddb->cursor[pipe];
3009 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3010 			   entry->end, skl_ddb_entry_size(entry));
3011 	}
3012 
3013 	drm_modeset_unlock_all(dev);
3014 
3015 	return 0;
3016 }
3017 
3018 static void drrs_status_per_crtc(struct seq_file *m,
3019 		struct drm_device *dev, struct intel_crtc *intel_crtc)
3020 {
3021 	struct intel_encoder *intel_encoder;
3022 	struct drm_i915_private *dev_priv = dev->dev_private;
3023 	struct i915_drrs *drrs = &dev_priv->drrs;
3024 	int vrefresh = 0;
3025 
3026 	for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) {
3027 		/* Encoder connected on this CRTC */
3028 		switch (intel_encoder->type) {
3029 		case INTEL_OUTPUT_EDP:
3030 			seq_puts(m, "eDP:\n");
3031 			break;
3032 		case INTEL_OUTPUT_DSI:
3033 			seq_puts(m, "DSI:\n");
3034 			break;
3035 		case INTEL_OUTPUT_HDMI:
3036 			seq_puts(m, "HDMI:\n");
3037 			break;
3038 		case INTEL_OUTPUT_DISPLAYPORT:
3039 			seq_puts(m, "DP:\n");
3040 			break;
3041 		default:
3042 			seq_printf(m, "Other encoder (id=%d).\n",
3043 						intel_encoder->type);
3044 			return;
3045 		}
3046 	}
3047 
3048 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3049 		seq_puts(m, "\tVBT: DRRS_type: Static");
3050 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3051 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3052 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3053 		seq_puts(m, "\tVBT: DRRS_type: None");
3054 	else
3055 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3056 
3057 	seq_puts(m, "\n\n");
3058 
3059 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3060 		struct intel_panel *panel;
3061 
3062 		mutex_lock(&drrs->mutex);
3063 		/* DRRS Supported */
3064 		seq_puts(m, "\tDRRS Supported: Yes\n");
3065 
3066 		/* disable_drrs() will make drrs->dp NULL */
3067 		if (!drrs->dp) {
3068 			seq_puts(m, "Idleness DRRS: Disabled");
3069 			mutex_unlock(&drrs->mutex);
3070 			return;
3071 		}
3072 
3073 		panel = &drrs->dp->attached_connector->panel;
3074 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3075 					drrs->busy_frontbuffer_bits);
3076 
3077 		seq_puts(m, "\n\t\t");
3078 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3079 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3080 			vrefresh = panel->fixed_mode->vrefresh;
3081 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3082 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3083 			vrefresh = panel->downclock_mode->vrefresh;
3084 		} else {
3085 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3086 						drrs->refresh_rate_type);
3087 			mutex_unlock(&drrs->mutex);
3088 			return;
3089 		}
3090 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3091 
3092 		seq_puts(m, "\n\t\t");
3093 		mutex_unlock(&drrs->mutex);
3094 	} else {
3095 		/* DRRS not supported. Print the VBT parameter*/
3096 		seq_puts(m, "\tDRRS Supported : No");
3097 	}
3098 	seq_puts(m, "\n");
3099 }
3100 
3101 static int i915_drrs_status(struct seq_file *m, void *unused)
3102 {
3103 	struct drm_info_node *node = m->private;
3104 	struct drm_device *dev = node->minor->dev;
3105 	struct intel_crtc *intel_crtc;
3106 	int active_crtc_cnt = 0;
3107 
3108 	for_each_intel_crtc(dev, intel_crtc) {
3109 		drm_modeset_lock(&intel_crtc->base.mutex, NULL);
3110 
3111 		if (intel_crtc->base.state->active) {
3112 			active_crtc_cnt++;
3113 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3114 
3115 			drrs_status_per_crtc(m, dev, intel_crtc);
3116 		}
3117 
3118 		drm_modeset_unlock(&intel_crtc->base.mutex);
3119 	}
3120 
3121 	if (!active_crtc_cnt)
3122 		seq_puts(m, "No active crtc found\n");
3123 
3124 	return 0;
3125 }
3126 
3127 struct pipe_crc_info {
3128 	const char *name;
3129 	struct drm_device *dev;
3130 	enum pipe pipe;
3131 };
3132 
3133 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3134 {
3135 	struct drm_info_node *node = (struct drm_info_node *) m->private;
3136 	struct drm_device *dev = node->minor->dev;
3137 	struct drm_encoder *encoder;
3138 	struct intel_encoder *intel_encoder;
3139 	struct intel_digital_port *intel_dig_port;
3140 	drm_modeset_lock_all(dev);
3141 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3142 		intel_encoder = to_intel_encoder(encoder);
3143 		if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
3144 			continue;
3145 		intel_dig_port = enc_to_dig_port(encoder);
3146 		if (!intel_dig_port->dp.can_mst)
3147 			continue;
3148 
3149 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3150 	}
3151 	drm_modeset_unlock_all(dev);
3152 	return 0;
3153 }
3154 
3155 static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3156 {
3157 	struct pipe_crc_info *info = inode->i_private;
3158 	struct drm_i915_private *dev_priv = info->dev->dev_private;
3159 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3160 
3161 	if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
3162 		return -ENODEV;
3163 
3164 	spin_lock_irq(&pipe_crc->lock);
3165 
3166 	if (pipe_crc->opened) {
3167 		spin_unlock_irq(&pipe_crc->lock);
3168 		return -EBUSY; /* already open */
3169 	}
3170 
3171 	pipe_crc->opened = true;
3172 	filep->private_data = inode->i_private;
3173 
3174 	spin_unlock_irq(&pipe_crc->lock);
3175 
3176 	return 0;
3177 }
3178 
3179 static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3180 {
3181 	struct pipe_crc_info *info = inode->i_private;
3182 	struct drm_i915_private *dev_priv = info->dev->dev_private;
3183 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3184 
3185 	spin_lock_irq(&pipe_crc->lock);
3186 	pipe_crc->opened = false;
3187 	spin_unlock_irq(&pipe_crc->lock);
3188 
3189 	return 0;
3190 }
3191 
3192 /* (6 fields, 8 chars each, space separated (5) + '\n') */
3193 #define PIPE_CRC_LINE_LEN	(6 * 8 + 5 + 1)
3194 /* account for \'0' */
3195 #define PIPE_CRC_BUFFER_LEN	(PIPE_CRC_LINE_LEN + 1)
3196 
3197 static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
3198 {
3199 	assert_spin_locked(&pipe_crc->lock);
3200 	return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3201 			INTEL_PIPE_CRC_ENTRIES_NR);
3202 }
3203 
3204 static ssize_t
3205 i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3206 		   loff_t *pos)
3207 {
3208 	struct pipe_crc_info *info = filep->private_data;
3209 	struct drm_device *dev = info->dev;
3210 	struct drm_i915_private *dev_priv = dev->dev_private;
3211 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3212 	char buf[PIPE_CRC_BUFFER_LEN];
3213 	int n_entries;
3214 	ssize_t bytes_read;
3215 
3216 	/*
3217 	 * Don't allow user space to provide buffers not big enough to hold
3218 	 * a line of data.
3219 	 */
3220 	if (count < PIPE_CRC_LINE_LEN)
3221 		return -EINVAL;
3222 
3223 	if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
3224 		return 0;
3225 
3226 	/* nothing to read */
3227 	spin_lock_irq(&pipe_crc->lock);
3228 	while (pipe_crc_data_count(pipe_crc) == 0) {
3229 		int ret;
3230 
3231 		if (filep->f_flags & O_NONBLOCK) {
3232 			spin_unlock_irq(&pipe_crc->lock);
3233 			return -EAGAIN;
3234 		}
3235 
3236 		ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
3237 				pipe_crc_data_count(pipe_crc), pipe_crc->lock);
3238 		if (ret) {
3239 			spin_unlock_irq(&pipe_crc->lock);
3240 			return ret;
3241 		}
3242 	}
3243 
3244 	/* We now have one or more entries to read */
3245 	n_entries = count / PIPE_CRC_LINE_LEN;
3246 
3247 	bytes_read = 0;
3248 	while (n_entries > 0) {
3249 		struct intel_pipe_crc_entry *entry =
3250 			&pipe_crc->entries[pipe_crc->tail];
3251 		int ret;
3252 
3253 		if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
3254 			     INTEL_PIPE_CRC_ENTRIES_NR) < 1)
3255 			break;
3256 
3257 		BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
3258 		pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
3259 
3260 		bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
3261 				       "%8u %8x %8x %8x %8x %8x\n",
3262 				       entry->frame, entry->crc[0],
3263 				       entry->crc[1], entry->crc[2],
3264 				       entry->crc[3], entry->crc[4]);
3265 
3266 		spin_unlock_irq(&pipe_crc->lock);
3267 
3268 		ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN);
3269 		if (ret == PIPE_CRC_LINE_LEN)
3270 			return -EFAULT;
3271 
3272 		user_buf += PIPE_CRC_LINE_LEN;
3273 		n_entries--;
3274 
3275 		spin_lock_irq(&pipe_crc->lock);
3276 	}
3277 
3278 	spin_unlock_irq(&pipe_crc->lock);
3279 
3280 	return bytes_read;
3281 }
3282 
3283 static const struct file_operations i915_pipe_crc_fops = {
3284 	.owner = THIS_MODULE,
3285 	.open = i915_pipe_crc_open,
3286 	.read = i915_pipe_crc_read,
3287 	.release = i915_pipe_crc_release,
3288 };
3289 
3290 static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
3291 	{
3292 		.name = "i915_pipe_A_crc",
3293 		.pipe = PIPE_A,
3294 	},
3295 	{
3296 		.name = "i915_pipe_B_crc",
3297 		.pipe = PIPE_B,
3298 	},
3299 	{
3300 		.name = "i915_pipe_C_crc",
3301 		.pipe = PIPE_C,
3302 	},
3303 };
3304 
3305 static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
3306 				enum pipe pipe)
3307 {
3308 	struct drm_device *dev = minor->dev;
3309 	struct dentry *ent;
3310 	struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
3311 
3312 	info->dev = dev;
3313 	ent = debugfs_create_file(info->name, S_IRUGO, root, info,
3314 				  &i915_pipe_crc_fops);
3315 	if (!ent)
3316 		return -ENOMEM;
3317 
3318 	return drm_add_fake_info_node(minor, ent, info);
3319 }
3320 
3321 static const char * const pipe_crc_sources[] = {
3322 	"none",
3323 	"plane1",
3324 	"plane2",
3325 	"pf",
3326 	"pipe",
3327 	"TV",
3328 	"DP-B",
3329 	"DP-C",
3330 	"DP-D",
3331 	"auto",
3332 };
3333 
3334 static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3335 {
3336 	BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
3337 	return pipe_crc_sources[source];
3338 }
3339 
3340 static int display_crc_ctl_show(struct seq_file *m, void *data)
3341 {
3342 	struct drm_device *dev = m->private;
3343 	struct drm_i915_private *dev_priv = dev->dev_private;
3344 	int i;
3345 
3346 	for (i = 0; i < I915_MAX_PIPES; i++)
3347 		seq_printf(m, "%c %s\n", pipe_name(i),
3348 			   pipe_crc_source_name(dev_priv->pipe_crc[i].source));
3349 
3350 	return 0;
3351 }
3352 
3353 static int display_crc_ctl_open(struct inode *inode, struct file *file)
3354 {
3355 	struct drm_device *dev = inode->i_private;
3356 
3357 	return single_open(file, display_crc_ctl_show, dev);
3358 }
3359 
3360 static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3361 				 uint32_t *val)
3362 {
3363 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3364 		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3365 
3366 	switch (*source) {
3367 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3368 		*val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
3369 		break;
3370 	case INTEL_PIPE_CRC_SOURCE_NONE:
3371 		*val = 0;
3372 		break;
3373 	default:
3374 		return -EINVAL;
3375 	}
3376 
3377 	return 0;
3378 }
3379 
3380 static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
3381 				     enum intel_pipe_crc_source *source)
3382 {
3383 	struct intel_encoder *encoder;
3384 	struct intel_crtc *crtc;
3385 	struct intel_digital_port *dig_port;
3386 	int ret = 0;
3387 
3388 	*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3389 
3390 	drm_modeset_lock_all(dev);
3391 	for_each_intel_encoder(dev, encoder) {
3392 		if (!encoder->base.crtc)
3393 			continue;
3394 
3395 		crtc = to_intel_crtc(encoder->base.crtc);
3396 
3397 		if (crtc->pipe != pipe)
3398 			continue;
3399 
3400 		switch (encoder->type) {
3401 		case INTEL_OUTPUT_TVOUT:
3402 			*source = INTEL_PIPE_CRC_SOURCE_TV;
3403 			break;
3404 		case INTEL_OUTPUT_DISPLAYPORT:
3405 		case INTEL_OUTPUT_EDP:
3406 			dig_port = enc_to_dig_port(&encoder->base);
3407 			switch (dig_port->port) {
3408 			case PORT_B:
3409 				*source = INTEL_PIPE_CRC_SOURCE_DP_B;
3410 				break;
3411 			case PORT_C:
3412 				*source = INTEL_PIPE_CRC_SOURCE_DP_C;
3413 				break;
3414 			case PORT_D:
3415 				*source = INTEL_PIPE_CRC_SOURCE_DP_D;
3416 				break;
3417 			default:
3418 				WARN(1, "nonexisting DP port %c\n",
3419 				     port_name(dig_port->port));
3420 				break;
3421 			}
3422 			break;
3423 		default:
3424 			break;
3425 		}
3426 	}
3427 	drm_modeset_unlock_all(dev);
3428 
3429 	return ret;
3430 }
3431 
3432 static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
3433 				enum pipe pipe,
3434 				enum intel_pipe_crc_source *source,
3435 				uint32_t *val)
3436 {
3437 	struct drm_i915_private *dev_priv = dev->dev_private;
3438 	bool need_stable_symbols = false;
3439 
3440 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3441 		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3442 		if (ret)
3443 			return ret;
3444 	}
3445 
3446 	switch (*source) {
3447 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3448 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
3449 		break;
3450 	case INTEL_PIPE_CRC_SOURCE_DP_B:
3451 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
3452 		need_stable_symbols = true;
3453 		break;
3454 	case INTEL_PIPE_CRC_SOURCE_DP_C:
3455 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
3456 		need_stable_symbols = true;
3457 		break;
3458 	case INTEL_PIPE_CRC_SOURCE_DP_D:
3459 		if (!IS_CHERRYVIEW(dev))
3460 			return -EINVAL;
3461 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
3462 		need_stable_symbols = true;
3463 		break;
3464 	case INTEL_PIPE_CRC_SOURCE_NONE:
3465 		*val = 0;
3466 		break;
3467 	default:
3468 		return -EINVAL;
3469 	}
3470 
3471 	/*
3472 	 * When the pipe CRC tap point is after the transcoders we need
3473 	 * to tweak symbol-level features to produce a deterministic series of
3474 	 * symbols for a given frame. We need to reset those features only once
3475 	 * a frame (instead of every nth symbol):
3476 	 *   - DC-balance: used to ensure a better clock recovery from the data
3477 	 *     link (SDVO)
3478 	 *   - DisplayPort scrambling: used for EMI reduction
3479 	 */
3480 	if (need_stable_symbols) {
3481 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3482 
3483 		tmp |= DC_BALANCE_RESET_VLV;
3484 		switch (pipe) {
3485 		case PIPE_A:
3486 			tmp |= PIPE_A_SCRAMBLE_RESET;
3487 			break;
3488 		case PIPE_B:
3489 			tmp |= PIPE_B_SCRAMBLE_RESET;
3490 			break;
3491 		case PIPE_C:
3492 			tmp |= PIPE_C_SCRAMBLE_RESET;
3493 			break;
3494 		default:
3495 			return -EINVAL;
3496 		}
3497 		I915_WRITE(PORT_DFT2_G4X, tmp);
3498 	}
3499 
3500 	return 0;
3501 }
3502 
3503 static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3504 				 enum pipe pipe,
3505 				 enum intel_pipe_crc_source *source,
3506 				 uint32_t *val)
3507 {
3508 	struct drm_i915_private *dev_priv = dev->dev_private;
3509 	bool need_stable_symbols = false;
3510 
3511 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
3512 		int ret = i9xx_pipe_crc_auto_source(dev, pipe, source);
3513 		if (ret)
3514 			return ret;
3515 	}
3516 
3517 	switch (*source) {
3518 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3519 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
3520 		break;
3521 	case INTEL_PIPE_CRC_SOURCE_TV:
3522 		if (!SUPPORTS_TV(dev))
3523 			return -EINVAL;
3524 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
3525 		break;
3526 	case INTEL_PIPE_CRC_SOURCE_DP_B:
3527 		if (!IS_G4X(dev))
3528 			return -EINVAL;
3529 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
3530 		need_stable_symbols = true;
3531 		break;
3532 	case INTEL_PIPE_CRC_SOURCE_DP_C:
3533 		if (!IS_G4X(dev))
3534 			return -EINVAL;
3535 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
3536 		need_stable_symbols = true;
3537 		break;
3538 	case INTEL_PIPE_CRC_SOURCE_DP_D:
3539 		if (!IS_G4X(dev))
3540 			return -EINVAL;
3541 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
3542 		need_stable_symbols = true;
3543 		break;
3544 	case INTEL_PIPE_CRC_SOURCE_NONE:
3545 		*val = 0;
3546 		break;
3547 	default:
3548 		return -EINVAL;
3549 	}
3550 
3551 	/*
3552 	 * When the pipe CRC tap point is after the transcoders we need
3553 	 * to tweak symbol-level features to produce a deterministic series of
3554 	 * symbols for a given frame. We need to reset those features only once
3555 	 * a frame (instead of every nth symbol):
3556 	 *   - DC-balance: used to ensure a better clock recovery from the data
3557 	 *     link (SDVO)
3558 	 *   - DisplayPort scrambling: used for EMI reduction
3559 	 */
3560 	if (need_stable_symbols) {
3561 		uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3562 
3563 		WARN_ON(!IS_G4X(dev));
3564 
3565 		I915_WRITE(PORT_DFT_I9XX,
3566 			   I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
3567 
3568 		if (pipe == PIPE_A)
3569 			tmp |= PIPE_A_SCRAMBLE_RESET;
3570 		else
3571 			tmp |= PIPE_B_SCRAMBLE_RESET;
3572 
3573 		I915_WRITE(PORT_DFT2_G4X, tmp);
3574 	}
3575 
3576 	return 0;
3577 }
3578 
3579 static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3580 					 enum pipe pipe)
3581 {
3582 	struct drm_i915_private *dev_priv = dev->dev_private;
3583 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3584 
3585 	switch (pipe) {
3586 	case PIPE_A:
3587 		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3588 		break;
3589 	case PIPE_B:
3590 		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3591 		break;
3592 	case PIPE_C:
3593 		tmp &= ~PIPE_C_SCRAMBLE_RESET;
3594 		break;
3595 	default:
3596 		return;
3597 	}
3598 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
3599 		tmp &= ~DC_BALANCE_RESET_VLV;
3600 	I915_WRITE(PORT_DFT2_G4X, tmp);
3601 
3602 }
3603 
3604 static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3605 					 enum pipe pipe)
3606 {
3607 	struct drm_i915_private *dev_priv = dev->dev_private;
3608 	uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3609 
3610 	if (pipe == PIPE_A)
3611 		tmp &= ~PIPE_A_SCRAMBLE_RESET;
3612 	else
3613 		tmp &= ~PIPE_B_SCRAMBLE_RESET;
3614 	I915_WRITE(PORT_DFT2_G4X, tmp);
3615 
3616 	if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
3617 		I915_WRITE(PORT_DFT_I9XX,
3618 			   I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
3619 	}
3620 }
3621 
3622 static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3623 				uint32_t *val)
3624 {
3625 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3626 		*source = INTEL_PIPE_CRC_SOURCE_PIPE;
3627 
3628 	switch (*source) {
3629 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3630 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
3631 		break;
3632 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3633 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
3634 		break;
3635 	case INTEL_PIPE_CRC_SOURCE_PIPE:
3636 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
3637 		break;
3638 	case INTEL_PIPE_CRC_SOURCE_NONE:
3639 		*val = 0;
3640 		break;
3641 	default:
3642 		return -EINVAL;
3643 	}
3644 
3645 	return 0;
3646 }
3647 
3648 static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
3649 {
3650 	struct drm_i915_private *dev_priv = dev->dev_private;
3651 	struct intel_crtc *crtc =
3652 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3653 	struct intel_crtc_state *pipe_config;
3654 	struct drm_atomic_state *state;
3655 	int ret = 0;
3656 
3657 	drm_modeset_lock_all(dev);
3658 	state = drm_atomic_state_alloc(dev);
3659 	if (!state) {
3660 		ret = -ENOMEM;
3661 		goto out;
3662 	}
3663 
3664 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
3665 	pipe_config = intel_atomic_get_crtc_state(state, crtc);
3666 	if (IS_ERR(pipe_config)) {
3667 		ret = PTR_ERR(pipe_config);
3668 		goto out;
3669 	}
3670 
3671 	pipe_config->pch_pfit.force_thru = enable;
3672 	if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
3673 	    pipe_config->pch_pfit.enabled != enable)
3674 		pipe_config->base.connectors_changed = true;
3675 
3676 	ret = drm_atomic_commit(state);
3677 out:
3678 	drm_modeset_unlock_all(dev);
3679 	WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
3680 	if (ret)
3681 		drm_atomic_state_free(state);
3682 }
3683 
3684 static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
3685 				enum pipe pipe,
3686 				enum intel_pipe_crc_source *source,
3687 				uint32_t *val)
3688 {
3689 	if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
3690 		*source = INTEL_PIPE_CRC_SOURCE_PF;
3691 
3692 	switch (*source) {
3693 	case INTEL_PIPE_CRC_SOURCE_PLANE1:
3694 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
3695 		break;
3696 	case INTEL_PIPE_CRC_SOURCE_PLANE2:
3697 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
3698 		break;
3699 	case INTEL_PIPE_CRC_SOURCE_PF:
3700 		if (IS_HASWELL(dev) && pipe == PIPE_A)
3701 			hsw_trans_edp_pipe_A_crc_wa(dev, true);
3702 
3703 		*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
3704 		break;
3705 	case INTEL_PIPE_CRC_SOURCE_NONE:
3706 		*val = 0;
3707 		break;
3708 	default:
3709 		return -EINVAL;
3710 	}
3711 
3712 	return 0;
3713 }
3714 
3715 static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3716 			       enum intel_pipe_crc_source source)
3717 {
3718 	struct drm_i915_private *dev_priv = dev->dev_private;
3719 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3720 	struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3721 									pipe));
3722 	u32 val = 0; /* shut up gcc */
3723 	int ret;
3724 
3725 	if (pipe_crc->source == source)
3726 		return 0;
3727 
3728 	/* forbid changing the source without going back to 'none' */
3729 	if (pipe_crc->source && source)
3730 		return -EINVAL;
3731 
3732 	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
3733 		DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
3734 		return -EIO;
3735 	}
3736 
3737 	if (IS_GEN2(dev))
3738 		ret = i8xx_pipe_crc_ctl_reg(&source, &val);
3739 	else if (INTEL_INFO(dev)->gen < 5)
3740 		ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3741 	else if (IS_VALLEYVIEW(dev))
3742 		ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3743 	else if (IS_GEN5(dev) || IS_GEN6(dev))
3744 		ret = ilk_pipe_crc_ctl_reg(&source, &val);
3745 	else
3746 		ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
3747 
3748 	if (ret != 0)
3749 		return ret;
3750 
3751 	/* none -> real source transition */
3752 	if (source) {
3753 		struct intel_pipe_crc_entry *entries;
3754 
3755 		DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
3756 				 pipe_name(pipe), pipe_crc_source_name(source));
3757 
3758 		entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
3759 				  sizeof(pipe_crc->entries[0]),
3760 				  GFP_KERNEL);
3761 		if (!entries)
3762 			return -ENOMEM;
3763 
3764 		/*
3765 		 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3766 		 * enabled and disabled dynamically based on package C states,
3767 		 * user space can't make reliable use of the CRCs, so let's just
3768 		 * completely disable it.
3769 		 */
3770 		hsw_disable_ips(crtc);
3771 
3772 		spin_lock_irq(&pipe_crc->lock);
3773 		kfree(pipe_crc->entries);
3774 		pipe_crc->entries = entries;
3775 		pipe_crc->head = 0;
3776 		pipe_crc->tail = 0;
3777 		spin_unlock_irq(&pipe_crc->lock);
3778 	}
3779 
3780 	pipe_crc->source = source;
3781 
3782 	I915_WRITE(PIPE_CRC_CTL(pipe), val);
3783 	POSTING_READ(PIPE_CRC_CTL(pipe));
3784 
3785 	/* real source -> none transition */
3786 	if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
3787 		struct intel_pipe_crc_entry *entries;
3788 		struct intel_crtc *crtc =
3789 			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
3790 
3791 		DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
3792 				 pipe_name(pipe));
3793 
3794 		drm_modeset_lock(&crtc->base.mutex, NULL);
3795 		if (crtc->base.state->active)
3796 			intel_wait_for_vblank(dev, pipe);
3797 		drm_modeset_unlock(&crtc->base.mutex);
3798 
3799 		spin_lock_irq(&pipe_crc->lock);
3800 		entries = pipe_crc->entries;
3801 		pipe_crc->entries = NULL;
3802 		pipe_crc->head = 0;
3803 		pipe_crc->tail = 0;
3804 		spin_unlock_irq(&pipe_crc->lock);
3805 
3806 		kfree(entries);
3807 
3808 		if (IS_G4X(dev))
3809 			g4x_undo_pipe_scramble_reset(dev, pipe);
3810 		else if (IS_VALLEYVIEW(dev))
3811 			vlv_undo_pipe_scramble_reset(dev, pipe);
3812 		else if (IS_HASWELL(dev) && pipe == PIPE_A)
3813 			hsw_trans_edp_pipe_A_crc_wa(dev, false);
3814 
3815 		hsw_enable_ips(crtc);
3816 	}
3817 
3818 	return 0;
3819 }
3820 
3821 /*
3822  * Parse pipe CRC command strings:
3823  *   command: wsp* object wsp+ name wsp+ source wsp*
3824  *   object: 'pipe'
3825  *   name: (A | B | C)
3826  *   source: (none | plane1 | plane2 | pf)
3827  *   wsp: (#0x20 | #0x9 | #0xA)+
3828  *
3829  * eg.:
3830  *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
3831  *  "pipe A none"    ->  Stop CRC
3832  */
3833 static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
3834 {
3835 	int n_words = 0;
3836 
3837 	while (*buf) {
3838 		char *end;
3839 
3840 		/* skip leading white space */
3841 		buf = skip_spaces(buf);
3842 		if (!*buf)
3843 			break;	/* end of buffer */
3844 
3845 		/* find end of word */
3846 		for (end = buf; *end && !isspace(*end); end++)
3847 			;
3848 
3849 		if (n_words == max_words) {
3850 			DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
3851 					 max_words);
3852 			return -EINVAL;	/* ran out of words[] before bytes */
3853 		}
3854 
3855 		if (*end)
3856 			*end++ = '\0';
3857 		words[n_words++] = buf;
3858 		buf = end;
3859 	}
3860 
3861 	return n_words;
3862 }
3863 
3864 enum intel_pipe_crc_object {
3865 	PIPE_CRC_OBJECT_PIPE,
3866 };
3867 
3868 static const char * const pipe_crc_objects[] = {
3869 	"pipe",
3870 };
3871 
3872 static int
3873 display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
3874 {
3875 	int i;
3876 
3877 	for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
3878 		if (!strcmp(buf, pipe_crc_objects[i])) {
3879 			*o = i;
3880 			return 0;
3881 		    }
3882 
3883 	return -EINVAL;
3884 }
3885 
3886 static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
3887 {
3888 	const char name = buf[0];
3889 
3890 	if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
3891 		return -EINVAL;
3892 
3893 	*pipe = name - 'A';
3894 
3895 	return 0;
3896 }
3897 
3898 static int
3899 display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
3900 {
3901 	int i;
3902 
3903 	for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
3904 		if (!strcmp(buf, pipe_crc_sources[i])) {
3905 			*s = i;
3906 			return 0;
3907 		    }
3908 
3909 	return -EINVAL;
3910 }
3911 
3912 static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
3913 {
3914 #define N_WORDS 3
3915 	int n_words;
3916 	char *words[N_WORDS];
3917 	enum pipe pipe;
3918 	enum intel_pipe_crc_object object;
3919 	enum intel_pipe_crc_source source;
3920 
3921 	n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
3922 	if (n_words != N_WORDS) {
3923 		DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
3924 				 N_WORDS);
3925 		return -EINVAL;
3926 	}
3927 
3928 	if (display_crc_ctl_parse_object(words[0], &object) < 0) {
3929 		DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
3930 		return -EINVAL;
3931 	}
3932 
3933 	if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
3934 		DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
3935 		return -EINVAL;
3936 	}
3937 
3938 	if (display_crc_ctl_parse_source(words[2], &source) < 0) {
3939 		DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
3940 		return -EINVAL;
3941 	}
3942 
3943 	return pipe_crc_set_source(dev, pipe, source);
3944 }
3945 
3946 static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
3947 				     size_t len, loff_t *offp)
3948 {
3949 	struct seq_file *m = file->private_data;
3950 	struct drm_device *dev = m->private;
3951 	char *tmpbuf;
3952 	int ret;
3953 
3954 	if (len == 0)
3955 		return 0;
3956 
3957 	if (len > PAGE_SIZE - 1) {
3958 		DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
3959 				 PAGE_SIZE);
3960 		return -E2BIG;
3961 	}
3962 
3963 	tmpbuf = kmalloc(len + 1, GFP_KERNEL);
3964 	if (!tmpbuf)
3965 		return -ENOMEM;
3966 
3967 	if (copy_from_user(tmpbuf, ubuf, len)) {
3968 		ret = -EFAULT;
3969 		goto out;
3970 	}
3971 	tmpbuf[len] = '\0';
3972 
3973 	ret = display_crc_ctl_parse(dev, tmpbuf, len);
3974 
3975 out:
3976 	kfree(tmpbuf);
3977 	if (ret < 0)
3978 		return ret;
3979 
3980 	*offp += len;
3981 	return len;
3982 }
3983 
3984 static const struct file_operations i915_display_crc_ctl_fops = {
3985 	.owner = THIS_MODULE,
3986 	.open = display_crc_ctl_open,
3987 	.read = seq_read,
3988 	.llseek = seq_lseek,
3989 	.release = single_release,
3990 	.write = display_crc_ctl_write
3991 };
3992 
3993 static ssize_t i915_displayport_test_active_write(struct file *file,
3994 					    const char __user *ubuf,
3995 					    size_t len, loff_t *offp)
3996 {
3997 	char *input_buffer;
3998 	int status = 0;
3999 	struct drm_device *dev;
4000 	struct drm_connector *connector;
4001 	struct list_head *connector_list;
4002 	struct intel_dp *intel_dp;
4003 	int val = 0;
4004 
4005 	dev = ((struct seq_file *)file->private_data)->private;
4006 
4007 	connector_list = &dev->mode_config.connector_list;
4008 
4009 	if (len == 0)
4010 		return 0;
4011 
4012 	input_buffer = kmalloc(len + 1, GFP_KERNEL);
4013 	if (!input_buffer)
4014 		return -ENOMEM;
4015 
4016 	if (copy_from_user(input_buffer, ubuf, len)) {
4017 		status = -EFAULT;
4018 		goto out;
4019 	}
4020 
4021 	input_buffer[len] = '\0';
4022 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
4023 
4024 	list_for_each_entry(connector, connector_list, head) {
4025 
4026 		if (connector->connector_type !=
4027 		    DRM_MODE_CONNECTOR_DisplayPort)
4028 			continue;
4029 
4030 		if (connector->status == connector_status_connected &&
4031 		    connector->encoder != NULL) {
4032 			intel_dp = enc_to_intel_dp(connector->encoder);
4033 			status = kstrtoint(input_buffer, 10, &val);
4034 			if (status < 0)
4035 				goto out;
4036 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
4037 			/* To prevent erroneous activation of the compliance
4038 			 * testing code, only accept an actual value of 1 here
4039 			 */
4040 			if (val == 1)
4041 				intel_dp->compliance_test_active = 1;
4042 			else
4043 				intel_dp->compliance_test_active = 0;
4044 		}
4045 	}
4046 out:
4047 	kfree(input_buffer);
4048 	if (status < 0)
4049 		return status;
4050 
4051 	*offp += len;
4052 	return len;
4053 }
4054 
4055 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
4056 {
4057 	struct drm_device *dev = m->private;
4058 	struct drm_connector *connector;
4059 	struct list_head *connector_list = &dev->mode_config.connector_list;
4060 	struct intel_dp *intel_dp;
4061 
4062 	list_for_each_entry(connector, connector_list, head) {
4063 
4064 		if (connector->connector_type !=
4065 		    DRM_MODE_CONNECTOR_DisplayPort)
4066 			continue;
4067 
4068 		if (connector->status == connector_status_connected &&
4069 		    connector->encoder != NULL) {
4070 			intel_dp = enc_to_intel_dp(connector->encoder);
4071 			if (intel_dp->compliance_test_active)
4072 				seq_puts(m, "1");
4073 			else
4074 				seq_puts(m, "0");
4075 		} else
4076 			seq_puts(m, "0");
4077 	}
4078 
4079 	return 0;
4080 }
4081 
4082 static int i915_displayport_test_active_open(struct inode *inode,
4083 				       struct file *file)
4084 {
4085 	struct drm_device *dev = inode->i_private;
4086 
4087 	return single_open(file, i915_displayport_test_active_show, dev);
4088 }
4089 
4090 static const struct file_operations i915_displayport_test_active_fops = {
4091 	.owner = THIS_MODULE,
4092 	.open = i915_displayport_test_active_open,
4093 	.read = seq_read,
4094 	.llseek = seq_lseek,
4095 	.release = single_release,
4096 	.write = i915_displayport_test_active_write
4097 };
4098 
4099 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
4100 {
4101 	struct drm_device *dev = m->private;
4102 	struct drm_connector *connector;
4103 	struct list_head *connector_list = &dev->mode_config.connector_list;
4104 	struct intel_dp *intel_dp;
4105 
4106 	list_for_each_entry(connector, connector_list, head) {
4107 
4108 		if (connector->connector_type !=
4109 		    DRM_MODE_CONNECTOR_DisplayPort)
4110 			continue;
4111 
4112 		if (connector->status == connector_status_connected &&
4113 		    connector->encoder != NULL) {
4114 			intel_dp = enc_to_intel_dp(connector->encoder);
4115 			seq_printf(m, "%lx", intel_dp->compliance_test_data);
4116 		} else
4117 			seq_puts(m, "0");
4118 	}
4119 
4120 	return 0;
4121 }
4122 static int i915_displayport_test_data_open(struct inode *inode,
4123 				       struct file *file)
4124 {
4125 	struct drm_device *dev = inode->i_private;
4126 
4127 	return single_open(file, i915_displayport_test_data_show, dev);
4128 }
4129 
4130 static const struct file_operations i915_displayport_test_data_fops = {
4131 	.owner = THIS_MODULE,
4132 	.open = i915_displayport_test_data_open,
4133 	.read = seq_read,
4134 	.llseek = seq_lseek,
4135 	.release = single_release
4136 };
4137 
4138 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
4139 {
4140 	struct drm_device *dev = m->private;
4141 	struct drm_connector *connector;
4142 	struct list_head *connector_list = &dev->mode_config.connector_list;
4143 	struct intel_dp *intel_dp;
4144 
4145 	list_for_each_entry(connector, connector_list, head) {
4146 
4147 		if (connector->connector_type !=
4148 		    DRM_MODE_CONNECTOR_DisplayPort)
4149 			continue;
4150 
4151 		if (connector->status == connector_status_connected &&
4152 		    connector->encoder != NULL) {
4153 			intel_dp = enc_to_intel_dp(connector->encoder);
4154 			seq_printf(m, "%02lx", intel_dp->compliance_test_type);
4155 		} else
4156 			seq_puts(m, "0");
4157 	}
4158 
4159 	return 0;
4160 }
4161 
4162 static int i915_displayport_test_type_open(struct inode *inode,
4163 				       struct file *file)
4164 {
4165 	struct drm_device *dev = inode->i_private;
4166 
4167 	return single_open(file, i915_displayport_test_type_show, dev);
4168 }
4169 
4170 static const struct file_operations i915_displayport_test_type_fops = {
4171 	.owner = THIS_MODULE,
4172 	.open = i915_displayport_test_type_open,
4173 	.read = seq_read,
4174 	.llseek = seq_lseek,
4175 	.release = single_release
4176 };
4177 
4178 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
4179 {
4180 	struct drm_device *dev = m->private;
4181 	int level;
4182 	int num_levels;
4183 
4184 	if (IS_CHERRYVIEW(dev))
4185 		num_levels = 3;
4186 	else if (IS_VALLEYVIEW(dev))
4187 		num_levels = 1;
4188 	else
4189 		num_levels = ilk_wm_max_level(dev) + 1;
4190 
4191 	drm_modeset_lock_all(dev);
4192 
4193 	for (level = 0; level < num_levels; level++) {
4194 		unsigned int latency = wm[level];
4195 
4196 		/*
4197 		 * - WM1+ latency values in 0.5us units
4198 		 * - latencies are in us on gen9/vlv/chv
4199 		 */
4200 		if (INTEL_INFO(dev)->gen >= 9 || IS_VALLEYVIEW(dev))
4201 			latency *= 10;
4202 		else if (level > 0)
4203 			latency *= 5;
4204 
4205 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
4206 			   level, wm[level], latency / 10, latency % 10);
4207 	}
4208 
4209 	drm_modeset_unlock_all(dev);
4210 }
4211 
4212 static int pri_wm_latency_show(struct seq_file *m, void *data)
4213 {
4214 	struct drm_device *dev = m->private;
4215 	struct drm_i915_private *dev_priv = dev->dev_private;
4216 	const uint16_t *latencies;
4217 
4218 	if (INTEL_INFO(dev)->gen >= 9)
4219 		latencies = dev_priv->wm.skl_latency;
4220 	else
4221 		latencies = to_i915(dev)->wm.pri_latency;
4222 
4223 	wm_latency_show(m, latencies);
4224 
4225 	return 0;
4226 }
4227 
4228 static int spr_wm_latency_show(struct seq_file *m, void *data)
4229 {
4230 	struct drm_device *dev = m->private;
4231 	struct drm_i915_private *dev_priv = dev->dev_private;
4232 	const uint16_t *latencies;
4233 
4234 	if (INTEL_INFO(dev)->gen >= 9)
4235 		latencies = dev_priv->wm.skl_latency;
4236 	else
4237 		latencies = to_i915(dev)->wm.spr_latency;
4238 
4239 	wm_latency_show(m, latencies);
4240 
4241 	return 0;
4242 }
4243 
4244 static int cur_wm_latency_show(struct seq_file *m, void *data)
4245 {
4246 	struct drm_device *dev = m->private;
4247 	struct drm_i915_private *dev_priv = dev->dev_private;
4248 	const uint16_t *latencies;
4249 
4250 	if (INTEL_INFO(dev)->gen >= 9)
4251 		latencies = dev_priv->wm.skl_latency;
4252 	else
4253 		latencies = to_i915(dev)->wm.cur_latency;
4254 
4255 	wm_latency_show(m, latencies);
4256 
4257 	return 0;
4258 }
4259 
4260 static int pri_wm_latency_open(struct inode *inode, struct file *file)
4261 {
4262 	struct drm_device *dev = inode->i_private;
4263 
4264 	if (INTEL_INFO(dev)->gen < 5)
4265 		return -ENODEV;
4266 
4267 	return single_open(file, pri_wm_latency_show, dev);
4268 }
4269 
4270 static int spr_wm_latency_open(struct inode *inode, struct file *file)
4271 {
4272 	struct drm_device *dev = inode->i_private;
4273 
4274 	if (HAS_GMCH_DISPLAY(dev))
4275 		return -ENODEV;
4276 
4277 	return single_open(file, spr_wm_latency_show, dev);
4278 }
4279 
4280 static int cur_wm_latency_open(struct inode *inode, struct file *file)
4281 {
4282 	struct drm_device *dev = inode->i_private;
4283 
4284 	if (HAS_GMCH_DISPLAY(dev))
4285 		return -ENODEV;
4286 
4287 	return single_open(file, cur_wm_latency_show, dev);
4288 }
4289 
4290 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
4291 				size_t len, loff_t *offp, uint16_t wm[8])
4292 {
4293 	struct seq_file *m = file->private_data;
4294 	struct drm_device *dev = m->private;
4295 	uint16_t new[8] = { 0 };
4296 	int num_levels;
4297 	int level;
4298 	int ret;
4299 	char tmp[32];
4300 
4301 	if (IS_CHERRYVIEW(dev))
4302 		num_levels = 3;
4303 	else if (IS_VALLEYVIEW(dev))
4304 		num_levels = 1;
4305 	else
4306 		num_levels = ilk_wm_max_level(dev) + 1;
4307 
4308 	if (len >= sizeof(tmp))
4309 		return -EINVAL;
4310 
4311 	if (copy_from_user(tmp, ubuf, len))
4312 		return -EFAULT;
4313 
4314 	tmp[len] = '\0';
4315 
4316 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
4317 		     &new[0], &new[1], &new[2], &new[3],
4318 		     &new[4], &new[5], &new[6], &new[7]);
4319 	if (ret != num_levels)
4320 		return -EINVAL;
4321 
4322 	drm_modeset_lock_all(dev);
4323 
4324 	for (level = 0; level < num_levels; level++)
4325 		wm[level] = new[level];
4326 
4327 	drm_modeset_unlock_all(dev);
4328 
4329 	return len;
4330 }
4331 
4332 
4333 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4334 				    size_t len, loff_t *offp)
4335 {
4336 	struct seq_file *m = file->private_data;
4337 	struct drm_device *dev = m->private;
4338 	struct drm_i915_private *dev_priv = dev->dev_private;
4339 	uint16_t *latencies;
4340 
4341 	if (INTEL_INFO(dev)->gen >= 9)
4342 		latencies = dev_priv->wm.skl_latency;
4343 	else
4344 		latencies = to_i915(dev)->wm.pri_latency;
4345 
4346 	return wm_latency_write(file, ubuf, len, offp, latencies);
4347 }
4348 
4349 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4350 				    size_t len, loff_t *offp)
4351 {
4352 	struct seq_file *m = file->private_data;
4353 	struct drm_device *dev = m->private;
4354 	struct drm_i915_private *dev_priv = dev->dev_private;
4355 	uint16_t *latencies;
4356 
4357 	if (INTEL_INFO(dev)->gen >= 9)
4358 		latencies = dev_priv->wm.skl_latency;
4359 	else
4360 		latencies = to_i915(dev)->wm.spr_latency;
4361 
4362 	return wm_latency_write(file, ubuf, len, offp, latencies);
4363 }
4364 
4365 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4366 				    size_t len, loff_t *offp)
4367 {
4368 	struct seq_file *m = file->private_data;
4369 	struct drm_device *dev = m->private;
4370 	struct drm_i915_private *dev_priv = dev->dev_private;
4371 	uint16_t *latencies;
4372 
4373 	if (INTEL_INFO(dev)->gen >= 9)
4374 		latencies = dev_priv->wm.skl_latency;
4375 	else
4376 		latencies = to_i915(dev)->wm.cur_latency;
4377 
4378 	return wm_latency_write(file, ubuf, len, offp, latencies);
4379 }
4380 
4381 static const struct file_operations i915_pri_wm_latency_fops = {
4382 	.owner = THIS_MODULE,
4383 	.open = pri_wm_latency_open,
4384 	.read = seq_read,
4385 	.llseek = seq_lseek,
4386 	.release = single_release,
4387 	.write = pri_wm_latency_write
4388 };
4389 
4390 static const struct file_operations i915_spr_wm_latency_fops = {
4391 	.owner = THIS_MODULE,
4392 	.open = spr_wm_latency_open,
4393 	.read = seq_read,
4394 	.llseek = seq_lseek,
4395 	.release = single_release,
4396 	.write = spr_wm_latency_write
4397 };
4398 
4399 static const struct file_operations i915_cur_wm_latency_fops = {
4400 	.owner = THIS_MODULE,
4401 	.open = cur_wm_latency_open,
4402 	.read = seq_read,
4403 	.llseek = seq_lseek,
4404 	.release = single_release,
4405 	.write = cur_wm_latency_write
4406 };
4407 
4408 static int
4409 i915_wedged_get(void *data, u64 *val)
4410 {
4411 	struct drm_device *dev = data;
4412 	struct drm_i915_private *dev_priv = dev->dev_private;
4413 
4414 	*val = atomic_read(&dev_priv->gpu_error.reset_counter);
4415 
4416 	return 0;
4417 }
4418 
4419 static int
4420 i915_wedged_set(void *data, u64 val)
4421 {
4422 	struct drm_device *dev = data;
4423 	struct drm_i915_private *dev_priv = dev->dev_private;
4424 
4425 	/*
4426 	 * There is no safeguard against this debugfs entry colliding
4427 	 * with the hangcheck calling same i915_handle_error() in
4428 	 * parallel, causing an explosion. For now we assume that the
4429 	 * test harness is responsible enough not to inject gpu hangs
4430 	 * while it is writing to 'i915_wedged'
4431 	 */
4432 
4433 	if (i915_reset_in_progress(&dev_priv->gpu_error))
4434 		return -EAGAIN;
4435 
4436 	intel_runtime_pm_get(dev_priv);
4437 
4438 	i915_handle_error(dev, val,
4439 			  "Manually setting wedged to %llu", val);
4440 
4441 	intel_runtime_pm_put(dev_priv);
4442 
4443 	return 0;
4444 }
4445 
4446 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4447 			i915_wedged_get, i915_wedged_set,
4448 			"%llu\n");
4449 
4450 static int
4451 i915_ring_stop_get(void *data, u64 *val)
4452 {
4453 	struct drm_device *dev = data;
4454 	struct drm_i915_private *dev_priv = dev->dev_private;
4455 
4456 	*val = dev_priv->gpu_error.stop_rings;
4457 
4458 	return 0;
4459 }
4460 
4461 static int
4462 i915_ring_stop_set(void *data, u64 val)
4463 {
4464 	struct drm_device *dev = data;
4465 	struct drm_i915_private *dev_priv = dev->dev_private;
4466 	int ret;
4467 
4468 	DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4469 
4470 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4471 	if (ret)
4472 		return ret;
4473 
4474 	dev_priv->gpu_error.stop_rings = val;
4475 	mutex_unlock(&dev->struct_mutex);
4476 
4477 	return 0;
4478 }
4479 
4480 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
4481 			i915_ring_stop_get, i915_ring_stop_set,
4482 			"0x%08llx\n");
4483 
4484 static int
4485 i915_ring_missed_irq_get(void *data, u64 *val)
4486 {
4487 	struct drm_device *dev = data;
4488 	struct drm_i915_private *dev_priv = dev->dev_private;
4489 
4490 	*val = dev_priv->gpu_error.missed_irq_rings;
4491 	return 0;
4492 }
4493 
4494 static int
4495 i915_ring_missed_irq_set(void *data, u64 val)
4496 {
4497 	struct drm_device *dev = data;
4498 	struct drm_i915_private *dev_priv = dev->dev_private;
4499 	int ret;
4500 
4501 	/* Lock against concurrent debugfs callers */
4502 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4503 	if (ret)
4504 		return ret;
4505 	dev_priv->gpu_error.missed_irq_rings = val;
4506 	mutex_unlock(&dev->struct_mutex);
4507 
4508 	return 0;
4509 }
4510 
4511 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4512 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4513 			"0x%08llx\n");
4514 
4515 static int
4516 i915_ring_test_irq_get(void *data, u64 *val)
4517 {
4518 	struct drm_device *dev = data;
4519 	struct drm_i915_private *dev_priv = dev->dev_private;
4520 
4521 	*val = dev_priv->gpu_error.test_irq_rings;
4522 
4523 	return 0;
4524 }
4525 
4526 static int
4527 i915_ring_test_irq_set(void *data, u64 val)
4528 {
4529 	struct drm_device *dev = data;
4530 	struct drm_i915_private *dev_priv = dev->dev_private;
4531 	int ret;
4532 
4533 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4534 
4535 	/* Lock against concurrent debugfs callers */
4536 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4537 	if (ret)
4538 		return ret;
4539 
4540 	dev_priv->gpu_error.test_irq_rings = val;
4541 	mutex_unlock(&dev->struct_mutex);
4542 
4543 	return 0;
4544 }
4545 
4546 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4547 			i915_ring_test_irq_get, i915_ring_test_irq_set,
4548 			"0x%08llx\n");
4549 
4550 #define DROP_UNBOUND 0x1
4551 #define DROP_BOUND 0x2
4552 #define DROP_RETIRE 0x4
4553 #define DROP_ACTIVE 0x8
4554 #define DROP_ALL (DROP_UNBOUND | \
4555 		  DROP_BOUND | \
4556 		  DROP_RETIRE | \
4557 		  DROP_ACTIVE)
4558 static int
4559 i915_drop_caches_get(void *data, u64 *val)
4560 {
4561 	*val = DROP_ALL;
4562 
4563 	return 0;
4564 }
4565 
4566 static int
4567 i915_drop_caches_set(void *data, u64 val)
4568 {
4569 	struct drm_device *dev = data;
4570 	struct drm_i915_private *dev_priv = dev->dev_private;
4571 	int ret;
4572 
4573 	DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
4574 
4575 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4576 	 * on ioctls on -EAGAIN. */
4577 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4578 	if (ret)
4579 		return ret;
4580 
4581 	if (val & DROP_ACTIVE) {
4582 		ret = i915_gpu_idle(dev);
4583 		if (ret)
4584 			goto unlock;
4585 	}
4586 
4587 	if (val & (DROP_RETIRE | DROP_ACTIVE))
4588 		i915_gem_retire_requests(dev);
4589 
4590 	if (val & DROP_BOUND)
4591 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
4592 
4593 	if (val & DROP_UNBOUND)
4594 		i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
4595 
4596 unlock:
4597 	mutex_unlock(&dev->struct_mutex);
4598 
4599 	return ret;
4600 }
4601 
4602 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4603 			i915_drop_caches_get, i915_drop_caches_set,
4604 			"0x%08llx\n");
4605 
4606 static int
4607 i915_max_freq_get(void *data, u64 *val)
4608 {
4609 	struct drm_device *dev = data;
4610 	struct drm_i915_private *dev_priv = dev->dev_private;
4611 	int ret;
4612 
4613 	if (INTEL_INFO(dev)->gen < 6)
4614 		return -ENODEV;
4615 
4616 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4617 
4618 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4619 	if (ret)
4620 		return ret;
4621 
4622 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
4623 	mutex_unlock(&dev_priv->rps.hw_lock);
4624 
4625 	return 0;
4626 }
4627 
4628 static int
4629 i915_max_freq_set(void *data, u64 val)
4630 {
4631 	struct drm_device *dev = data;
4632 	struct drm_i915_private *dev_priv = dev->dev_private;
4633 	u32 hw_max, hw_min;
4634 	int ret;
4635 
4636 	if (INTEL_INFO(dev)->gen < 6)
4637 		return -ENODEV;
4638 
4639 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4640 
4641 	DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
4642 
4643 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4644 	if (ret)
4645 		return ret;
4646 
4647 	/*
4648 	 * Turbo will still be enabled, but won't go above the set value.
4649 	 */
4650 	val = intel_freq_opcode(dev_priv, val);
4651 
4652 	hw_max = dev_priv->rps.max_freq;
4653 	hw_min = dev_priv->rps.min_freq;
4654 
4655 	if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) {
4656 		mutex_unlock(&dev_priv->rps.hw_lock);
4657 		return -EINVAL;
4658 	}
4659 
4660 	dev_priv->rps.max_freq_softlimit = val;
4661 
4662 	intel_set_rps(dev, val);
4663 
4664 	mutex_unlock(&dev_priv->rps.hw_lock);
4665 
4666 	return 0;
4667 }
4668 
4669 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops,
4670 			i915_max_freq_get, i915_max_freq_set,
4671 			"%llu\n");
4672 
4673 static int
4674 i915_min_freq_get(void *data, u64 *val)
4675 {
4676 	struct drm_device *dev = data;
4677 	struct drm_i915_private *dev_priv = dev->dev_private;
4678 	int ret;
4679 
4680 	if (INTEL_INFO(dev)->gen < 6)
4681 		return -ENODEV;
4682 
4683 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4684 
4685 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4686 	if (ret)
4687 		return ret;
4688 
4689 	*val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
4690 	mutex_unlock(&dev_priv->rps.hw_lock);
4691 
4692 	return 0;
4693 }
4694 
4695 static int
4696 i915_min_freq_set(void *data, u64 val)
4697 {
4698 	struct drm_device *dev = data;
4699 	struct drm_i915_private *dev_priv = dev->dev_private;
4700 	u32 hw_max, hw_min;
4701 	int ret;
4702 
4703 	if (INTEL_INFO(dev)->gen < 6)
4704 		return -ENODEV;
4705 
4706 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4707 
4708 	DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
4709 
4710 	ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
4711 	if (ret)
4712 		return ret;
4713 
4714 	/*
4715 	 * Turbo will still be enabled, but won't go below the set value.
4716 	 */
4717 	val = intel_freq_opcode(dev_priv, val);
4718 
4719 	hw_max = dev_priv->rps.max_freq;
4720 	hw_min = dev_priv->rps.min_freq;
4721 
4722 	if (val < hw_min || val > hw_max || val > dev_priv->rps.max_freq_softlimit) {
4723 		mutex_unlock(&dev_priv->rps.hw_lock);
4724 		return -EINVAL;
4725 	}
4726 
4727 	dev_priv->rps.min_freq_softlimit = val;
4728 
4729 	intel_set_rps(dev, val);
4730 
4731 	mutex_unlock(&dev_priv->rps.hw_lock);
4732 
4733 	return 0;
4734 }
4735 
4736 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops,
4737 			i915_min_freq_get, i915_min_freq_set,
4738 			"%llu\n");
4739 
4740 static int
4741 i915_cache_sharing_get(void *data, u64 *val)
4742 {
4743 	struct drm_device *dev = data;
4744 	struct drm_i915_private *dev_priv = dev->dev_private;
4745 	u32 snpcr;
4746 	int ret;
4747 
4748 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
4749 		return -ENODEV;
4750 
4751 	ret = mutex_lock_interruptible(&dev->struct_mutex);
4752 	if (ret)
4753 		return ret;
4754 	intel_runtime_pm_get(dev_priv);
4755 
4756 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4757 
4758 	intel_runtime_pm_put(dev_priv);
4759 	mutex_unlock(&dev_priv->dev->struct_mutex);
4760 
4761 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4762 
4763 	return 0;
4764 }
4765 
4766 static int
4767 i915_cache_sharing_set(void *data, u64 val)
4768 {
4769 	struct drm_device *dev = data;
4770 	struct drm_i915_private *dev_priv = dev->dev_private;
4771 	u32 snpcr;
4772 
4773 	if (!(IS_GEN6(dev) || IS_GEN7(dev)))
4774 		return -ENODEV;
4775 
4776 	if (val > 3)
4777 		return -EINVAL;
4778 
4779 	intel_runtime_pm_get(dev_priv);
4780 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4781 
4782 	/* Update the cache sharing policy here as well */
4783 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4784 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
4785 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4786 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4787 
4788 	intel_runtime_pm_put(dev_priv);
4789 	return 0;
4790 }
4791 
4792 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4793 			i915_cache_sharing_get, i915_cache_sharing_set,
4794 			"%llu\n");
4795 
4796 struct sseu_dev_status {
4797 	unsigned int slice_total;
4798 	unsigned int subslice_total;
4799 	unsigned int subslice_per_slice;
4800 	unsigned int eu_total;
4801 	unsigned int eu_per_subslice;
4802 };
4803 
4804 static void cherryview_sseu_device_status(struct drm_device *dev,
4805 					  struct sseu_dev_status *stat)
4806 {
4807 	struct drm_i915_private *dev_priv = dev->dev_private;
4808 	const int ss_max = 2;
4809 	int ss;
4810 	u32 sig1[ss_max], sig2[ss_max];
4811 
4812 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4813 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4814 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4815 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4816 
4817 	for (ss = 0; ss < ss_max; ss++) {
4818 		unsigned int eu_cnt;
4819 
4820 		if (sig1[ss] & CHV_SS_PG_ENABLE)
4821 			/* skip disabled subslice */
4822 			continue;
4823 
4824 		stat->slice_total = 1;
4825 		stat->subslice_per_slice++;
4826 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4827 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4828 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4829 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4830 		stat->eu_total += eu_cnt;
4831 		stat->eu_per_subslice = max(stat->eu_per_subslice, eu_cnt);
4832 	}
4833 	stat->subslice_total = stat->subslice_per_slice;
4834 }
4835 
4836 static void gen9_sseu_device_status(struct drm_device *dev,
4837 				    struct sseu_dev_status *stat)
4838 {
4839 	struct drm_i915_private *dev_priv = dev->dev_private;
4840 	int s_max = 3, ss_max = 4;
4841 	int s, ss;
4842 	u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
4843 
4844 	/* BXT has a single slice and at most 3 subslices. */
4845 	if (IS_BROXTON(dev)) {
4846 		s_max = 1;
4847 		ss_max = 3;
4848 	}
4849 
4850 	for (s = 0; s < s_max; s++) {
4851 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4852 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4853 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4854 	}
4855 
4856 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4857 		     GEN9_PGCTL_SSA_EU19_ACK |
4858 		     GEN9_PGCTL_SSA_EU210_ACK |
4859 		     GEN9_PGCTL_SSA_EU311_ACK;
4860 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4861 		     GEN9_PGCTL_SSB_EU19_ACK |
4862 		     GEN9_PGCTL_SSB_EU210_ACK |
4863 		     GEN9_PGCTL_SSB_EU311_ACK;
4864 
4865 	for (s = 0; s < s_max; s++) {
4866 		unsigned int ss_cnt = 0;
4867 
4868 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4869 			/* skip disabled slice */
4870 			continue;
4871 
4872 		stat->slice_total++;
4873 
4874 		if (IS_SKYLAKE(dev))
4875 			ss_cnt = INTEL_INFO(dev)->subslice_per_slice;
4876 
4877 		for (ss = 0; ss < ss_max; ss++) {
4878 			unsigned int eu_cnt;
4879 
4880 			if (IS_BROXTON(dev) &&
4881 			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4882 				/* skip disabled subslice */
4883 				continue;
4884 
4885 			if (IS_BROXTON(dev))
4886 				ss_cnt++;
4887 
4888 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4889 					       eu_mask[ss%2]);
4890 			stat->eu_total += eu_cnt;
4891 			stat->eu_per_subslice = max(stat->eu_per_subslice,
4892 						    eu_cnt);
4893 		}
4894 
4895 		stat->subslice_total += ss_cnt;
4896 		stat->subslice_per_slice = max(stat->subslice_per_slice,
4897 					       ss_cnt);
4898 	}
4899 }
4900 
4901 static int i915_sseu_status(struct seq_file *m, void *unused)
4902 {
4903 	struct drm_info_node *node = (struct drm_info_node *) m->private;
4904 	struct drm_device *dev = node->minor->dev;
4905 	struct sseu_dev_status stat;
4906 
4907 	if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
4908 		return -ENODEV;
4909 
4910 	seq_puts(m, "SSEU Device Info\n");
4911 	seq_printf(m, "  Available Slice Total: %u\n",
4912 		   INTEL_INFO(dev)->slice_total);
4913 	seq_printf(m, "  Available Subslice Total: %u\n",
4914 		   INTEL_INFO(dev)->subslice_total);
4915 	seq_printf(m, "  Available Subslice Per Slice: %u\n",
4916 		   INTEL_INFO(dev)->subslice_per_slice);
4917 	seq_printf(m, "  Available EU Total: %u\n",
4918 		   INTEL_INFO(dev)->eu_total);
4919 	seq_printf(m, "  Available EU Per Subslice: %u\n",
4920 		   INTEL_INFO(dev)->eu_per_subslice);
4921 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4922 		   yesno(INTEL_INFO(dev)->has_slice_pg));
4923 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4924 		   yesno(INTEL_INFO(dev)->has_subslice_pg));
4925 	seq_printf(m, "  Has EU Power Gating: %s\n",
4926 		   yesno(INTEL_INFO(dev)->has_eu_pg));
4927 
4928 	seq_puts(m, "SSEU Device Status\n");
4929 	memset(&stat, 0, sizeof(stat));
4930 	if (IS_CHERRYVIEW(dev)) {
4931 		cherryview_sseu_device_status(dev, &stat);
4932 	} else if (INTEL_INFO(dev)->gen >= 9) {
4933 		gen9_sseu_device_status(dev, &stat);
4934 	}
4935 	seq_printf(m, "  Enabled Slice Total: %u\n",
4936 		   stat.slice_total);
4937 	seq_printf(m, "  Enabled Subslice Total: %u\n",
4938 		   stat.subslice_total);
4939 	seq_printf(m, "  Enabled Subslice Per Slice: %u\n",
4940 		   stat.subslice_per_slice);
4941 	seq_printf(m, "  Enabled EU Total: %u\n",
4942 		   stat.eu_total);
4943 	seq_printf(m, "  Enabled EU Per Subslice: %u\n",
4944 		   stat.eu_per_subslice);
4945 
4946 	return 0;
4947 }
4948 
4949 static int i915_forcewake_open(struct inode *inode, struct file *file)
4950 {
4951 	struct drm_device *dev = inode->i_private;
4952 	struct drm_i915_private *dev_priv = dev->dev_private;
4953 
4954 	if (INTEL_INFO(dev)->gen < 6)
4955 		return 0;
4956 
4957 	intel_runtime_pm_get(dev_priv);
4958 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4959 
4960 	return 0;
4961 }
4962 
4963 static int i915_forcewake_release(struct inode *inode, struct file *file)
4964 {
4965 	struct drm_device *dev = inode->i_private;
4966 	struct drm_i915_private *dev_priv = dev->dev_private;
4967 
4968 	if (INTEL_INFO(dev)->gen < 6)
4969 		return 0;
4970 
4971 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4972 	intel_runtime_pm_put(dev_priv);
4973 
4974 	return 0;
4975 }
4976 
4977 static const struct file_operations i915_forcewake_fops = {
4978 	.owner = THIS_MODULE,
4979 	.open = i915_forcewake_open,
4980 	.release = i915_forcewake_release,
4981 };
4982 
4983 static int i915_forcewake_create(struct dentry *root, struct drm_minor *minor)
4984 {
4985 	struct drm_device *dev = minor->dev;
4986 	struct dentry *ent;
4987 
4988 	ent = debugfs_create_file("i915_forcewake_user",
4989 				  S_IRUSR,
4990 				  root, dev,
4991 				  &i915_forcewake_fops);
4992 	if (!ent)
4993 		return -ENOMEM;
4994 
4995 	return drm_add_fake_info_node(minor, ent, &i915_forcewake_fops);
4996 }
4997 
4998 static int i915_debugfs_create(struct dentry *root,
4999 			       struct drm_minor *minor,
5000 			       const char *name,
5001 			       const struct file_operations *fops)
5002 {
5003 	struct drm_device *dev = minor->dev;
5004 	struct dentry *ent;
5005 
5006 	ent = debugfs_create_file(name,
5007 				  S_IRUGO | S_IWUSR,
5008 				  root, dev,
5009 				  fops);
5010 	if (!ent)
5011 		return -ENOMEM;
5012 
5013 	return drm_add_fake_info_node(minor, ent, fops);
5014 }
5015 
5016 static const struct drm_info_list i915_debugfs_list[] = {
5017 	{"i915_capabilities", i915_capabilities, 0},
5018 	{"i915_gem_objects", i915_gem_object_info, 0},
5019 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
5020 	{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
5021 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
5022 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
5023 	{"i915_gem_stolen", i915_gem_stolen_list_info },
5024 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
5025 	{"i915_gem_request", i915_gem_request_info, 0},
5026 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
5027 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
5028 	{"i915_gem_interrupt", i915_interrupt_info, 0},
5029 	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
5030 	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
5031 	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
5032 	{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
5033 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
5034 	{"i915_frequency_info", i915_frequency_info, 0},
5035 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
5036 	{"i915_drpc_info", i915_drpc_info, 0},
5037 	{"i915_emon_status", i915_emon_status, 0},
5038 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
5039 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
5040 	{"i915_fbc_status", i915_fbc_status, 0},
5041 	{"i915_ips_status", i915_ips_status, 0},
5042 	{"i915_sr_status", i915_sr_status, 0},
5043 	{"i915_opregion", i915_opregion, 0},
5044 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
5045 	{"i915_context_status", i915_context_status, 0},
5046 	{"i915_dump_lrc", i915_dump_lrc, 0},
5047 	{"i915_execlists", i915_execlists, 0},
5048 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
5049 	{"i915_swizzle_info", i915_swizzle_info, 0},
5050 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
5051 	{"i915_llc", i915_llc, 0},
5052 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
5053 	{"i915_sink_crc_eDP1", i915_sink_crc, 0},
5054 	{"i915_energy_uJ", i915_energy_uJ, 0},
5055 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
5056 	{"i915_power_domain_info", i915_power_domain_info, 0},
5057 	{"i915_display_info", i915_display_info, 0},
5058 	{"i915_semaphore_status", i915_semaphore_status, 0},
5059 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
5060 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
5061 	{"i915_wa_registers", i915_wa_registers, 0},
5062 	{"i915_ddb_info", i915_ddb_info, 0},
5063 	{"i915_sseu_status", i915_sseu_status, 0},
5064 	{"i915_drrs_status", i915_drrs_status, 0},
5065 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
5066 };
5067 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
5068 
5069 static const struct i915_debugfs_files {
5070 	const char *name;
5071 	const struct file_operations *fops;
5072 } i915_debugfs_files[] = {
5073 	{"i915_wedged", &i915_wedged_fops},
5074 	{"i915_max_freq", &i915_max_freq_fops},
5075 	{"i915_min_freq", &i915_min_freq_fops},
5076 	{"i915_cache_sharing", &i915_cache_sharing_fops},
5077 	{"i915_ring_stop", &i915_ring_stop_fops},
5078 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
5079 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
5080 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
5081 	{"i915_error_state", &i915_error_state_fops},
5082 	{"i915_next_seqno", &i915_next_seqno_fops},
5083 	{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
5084 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
5085 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
5086 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
5087 	{"i915_fbc_false_color", &i915_fbc_fc_fops},
5088 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
5089 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
5090 	{"i915_dp_test_active", &i915_displayport_test_active_fops}
5091 };
5092 
5093 void intel_display_crc_init(struct drm_device *dev)
5094 {
5095 	struct drm_i915_private *dev_priv = dev->dev_private;
5096 	enum pipe pipe;
5097 
5098 	for_each_pipe(dev_priv, pipe) {
5099 		struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
5100 
5101 		pipe_crc->opened = false;
5102 		spin_lock_init(&pipe_crc->lock);
5103 		init_waitqueue_head(&pipe_crc->wq);
5104 	}
5105 }
5106 
5107 int i915_debugfs_init(struct drm_minor *minor)
5108 {
5109 	int ret, i;
5110 
5111 	ret = i915_forcewake_create(minor->debugfs_root, minor);
5112 	if (ret)
5113 		return ret;
5114 
5115 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5116 		ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
5117 		if (ret)
5118 			return ret;
5119 	}
5120 
5121 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5122 		ret = i915_debugfs_create(minor->debugfs_root, minor,
5123 					  i915_debugfs_files[i].name,
5124 					  i915_debugfs_files[i].fops);
5125 		if (ret)
5126 			return ret;
5127 	}
5128 
5129 	return drm_debugfs_create_files(i915_debugfs_list,
5130 					I915_DEBUGFS_ENTRIES,
5131 					minor->debugfs_root, minor);
5132 }
5133 
5134 void i915_debugfs_cleanup(struct drm_minor *minor)
5135 {
5136 	int i;
5137 
5138 	drm_debugfs_remove_files(i915_debugfs_list,
5139 				 I915_DEBUGFS_ENTRIES, minor);
5140 
5141 	drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
5142 				 1, minor);
5143 
5144 	for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
5145 		struct drm_info_list *info_list =
5146 			(struct drm_info_list *)&i915_pipe_crc_data[i];
5147 
5148 		drm_debugfs_remove_files(info_list, 1, minor);
5149 	}
5150 
5151 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
5152 		struct drm_info_list *info_list =
5153 			(struct drm_info_list *) i915_debugfs_files[i].fops;
5154 
5155 		drm_debugfs_remove_files(info_list, 1, minor);
5156 	}
5157 }
5158 
5159 struct dpcd_block {
5160 	/* DPCD dump start address. */
5161 	unsigned int offset;
5162 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
5163 	unsigned int end;
5164 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
5165 	size_t size;
5166 	/* Only valid for eDP. */
5167 	bool edp;
5168 };
5169 
5170 static const struct dpcd_block i915_dpcd_debug[] = {
5171 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
5172 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
5173 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
5174 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
5175 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
5176 	{ .offset = DP_SET_POWER },
5177 	{ .offset = DP_EDP_DPCD_REV },
5178 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
5179 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
5180 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
5181 };
5182 
5183 static int i915_dpcd_show(struct seq_file *m, void *data)
5184 {
5185 	struct drm_connector *connector = m->private;
5186 	struct intel_dp *intel_dp =
5187 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
5188 	uint8_t buf[16];
5189 	ssize_t err;
5190 	int i;
5191 
5192 	if (connector->status != connector_status_connected)
5193 		return -ENODEV;
5194 
5195 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
5196 		const struct dpcd_block *b = &i915_dpcd_debug[i];
5197 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
5198 
5199 		if (b->edp &&
5200 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
5201 			continue;
5202 
5203 		/* low tech for now */
5204 		if (WARN_ON(size > sizeof(buf)))
5205 			continue;
5206 
5207 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
5208 		if (err <= 0) {
5209 			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
5210 				  size, b->offset, err);
5211 			continue;
5212 		}
5213 
5214 		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
5215 	}
5216 
5217 	return 0;
5218 }
5219 
5220 static int i915_dpcd_open(struct inode *inode, struct file *file)
5221 {
5222 	return single_open(file, i915_dpcd_show, inode->i_private);
5223 }
5224 
5225 static const struct file_operations i915_dpcd_fops = {
5226 	.owner = THIS_MODULE,
5227 	.open = i915_dpcd_open,
5228 	.read = seq_read,
5229 	.llseek = seq_lseek,
5230 	.release = single_release,
5231 };
5232 
5233 /**
5234  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5235  * @connector: pointer to a registered drm_connector
5236  *
5237  * Cleanup will be done by drm_connector_unregister() through a call to
5238  * drm_debugfs_connector_remove().
5239  *
5240  * Returns 0 on success, negative error codes on error.
5241  */
5242 int i915_debugfs_connector_add(struct drm_connector *connector)
5243 {
5244 	struct dentry *root = connector->debugfs_entry;
5245 
5246 	/* The connector must have been registered beforehands. */
5247 	if (!root)
5248 		return -ENODEV;
5249 
5250 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5251 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5252 		debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
5253 				    &i915_dpcd_fops);
5254 
5255 	return 0;
5256 }
5257