1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34 
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37 	return to_i915(node->minor->dev);
38 }
39 
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
43 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
44 	struct drm_printer p = drm_seq_file_printer(m);
45 
46 	seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47 	seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49 
50 	intel_device_info_dump_flags(info, &p);
51 	intel_device_info_dump_runtime(info, &p);
52 	intel_driver_caps_print(&dev_priv->caps, &p);
53 
54 	kernel_param_lock(THIS_MODULE);
55 	i915_params_dump(&i915_modparams, &p);
56 	kernel_param_unlock(THIS_MODULE);
57 
58 	return 0;
59 }
60 
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63 	return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65 
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68 	return obj->pin_global ? 'p' : ' ';
69 }
70 
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73 	switch (i915_gem_object_get_tiling(obj)) {
74 	default:
75 	case I915_TILING_NONE: return ' ';
76 	case I915_TILING_X: return 'X';
77 	case I915_TILING_Y: return 'Y';
78 	}
79 }
80 
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83 	return obj->userfault_count ? 'g' : ' ';
84 }
85 
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88 	return obj->mm.mapping ? 'M' : ' ';
89 }
90 
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93 	u64 size = 0;
94 	struct i915_vma *vma;
95 
96 	for_each_ggtt_vma(vma, obj) {
97 		if (drm_mm_node_allocated(&vma->node))
98 			size += vma->node.size;
99 	}
100 
101 	return size;
102 }
103 
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107 	size_t x = 0;
108 
109 	switch (page_sizes) {
110 	case 0:
111 		return "";
112 	case I915_GTT_PAGE_SIZE_4K:
113 		return "4K";
114 	case I915_GTT_PAGE_SIZE_64K:
115 		return "64K";
116 	case I915_GTT_PAGE_SIZE_2M:
117 		return "2M";
118 	default:
119 		if (!buf)
120 			return "M";
121 
122 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123 			x += snprintf(buf + x, len - x, "2M, ");
124 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125 			x += snprintf(buf + x, len - x, "64K, ");
126 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127 			x += snprintf(buf + x, len - x, "4K, ");
128 		buf[x-2] = '\0';
129 
130 		return buf;
131 	}
132 }
133 
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138 	struct intel_engine_cs *engine;
139 	struct i915_vma *vma;
140 	unsigned int frontbuffer_bits;
141 	int pin_count = 0;
142 
143 	lockdep_assert_held(&obj->base.dev->struct_mutex);
144 
145 	seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146 		   &obj->base,
147 		   get_active_flag(obj),
148 		   get_pin_flag(obj),
149 		   get_tiling_flag(obj),
150 		   get_global_flag(obj),
151 		   get_pin_mapped_flag(obj),
152 		   obj->base.size / 1024,
153 		   obj->read_domains,
154 		   obj->write_domain,
155 		   i915_cache_level_str(dev_priv, obj->cache_level),
156 		   obj->mm.dirty ? " dirty" : "",
157 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158 	if (obj->base.name)
159 		seq_printf(m, " (name: %d)", obj->base.name);
160 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
161 		if (i915_vma_is_pinned(vma))
162 			pin_count++;
163 	}
164 	seq_printf(m, " (pinned x %d)", pin_count);
165 	if (obj->pin_global)
166 		seq_printf(m, " (global)");
167 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
168 		if (!drm_mm_node_allocated(&vma->node))
169 			continue;
170 
171 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
173 			   vma->node.start, vma->node.size,
174 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175 		if (i915_vma_is_ggtt(vma)) {
176 			switch (vma->ggtt_view.type) {
177 			case I915_GGTT_VIEW_NORMAL:
178 				seq_puts(m, ", normal");
179 				break;
180 
181 			case I915_GGTT_VIEW_PARTIAL:
182 				seq_printf(m, ", partial [%08llx+%x]",
183 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
184 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
185 				break;
186 
187 			case I915_GGTT_VIEW_ROTATED:
188 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189 					   vma->ggtt_view.rotated.plane[0].width,
190 					   vma->ggtt_view.rotated.plane[0].height,
191 					   vma->ggtt_view.rotated.plane[0].stride,
192 					   vma->ggtt_view.rotated.plane[0].offset,
193 					   vma->ggtt_view.rotated.plane[1].width,
194 					   vma->ggtt_view.rotated.plane[1].height,
195 					   vma->ggtt_view.rotated.plane[1].stride,
196 					   vma->ggtt_view.rotated.plane[1].offset);
197 				break;
198 
199 			default:
200 				MISSING_CASE(vma->ggtt_view.type);
201 				break;
202 			}
203 		}
204 		if (vma->fence)
205 			seq_printf(m, " , fence: %d%s",
206 				   vma->fence->id,
207 				   i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208 		seq_puts(m, ")");
209 	}
210 	if (obj->stolen)
211 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212 
213 	engine = i915_gem_object_last_write_engine(obj);
214 	if (engine)
215 		seq_printf(m, " (%s)", engine->name);
216 
217 	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218 	if (frontbuffer_bits)
219 		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221 
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224 	const struct drm_i915_gem_object *a =
225 		*(const struct drm_i915_gem_object **)A;
226 	const struct drm_i915_gem_object *b =
227 		*(const struct drm_i915_gem_object **)B;
228 
229 	if (a->stolen->start < b->stolen->start)
230 		return -1;
231 	if (a->stolen->start > b->stolen->start)
232 		return 1;
233 	return 0;
234 }
235 
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
239 	struct drm_device *dev = &dev_priv->drm;
240 	struct drm_i915_gem_object **objects;
241 	struct drm_i915_gem_object *obj;
242 	u64 total_obj_size, total_gtt_size;
243 	unsigned long total, count, n;
244 	int ret;
245 
246 	total = READ_ONCE(dev_priv->mm.object_count);
247 	objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248 	if (!objects)
249 		return -ENOMEM;
250 
251 	ret = mutex_lock_interruptible(&dev->struct_mutex);
252 	if (ret)
253 		goto out;
254 
255 	total_obj_size = total_gtt_size = count = 0;
256 
257 	spin_lock(&dev_priv->mm.obj_lock);
258 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259 		if (count == total)
260 			break;
261 
262 		if (obj->stolen == NULL)
263 			continue;
264 
265 		objects[count++] = obj;
266 		total_obj_size += obj->base.size;
267 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268 
269 	}
270 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271 		if (count == total)
272 			break;
273 
274 		if (obj->stolen == NULL)
275 			continue;
276 
277 		objects[count++] = obj;
278 		total_obj_size += obj->base.size;
279 	}
280 	spin_unlock(&dev_priv->mm.obj_lock);
281 
282 	sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283 
284 	seq_puts(m, "Stolen:\n");
285 	for (n = 0; n < count; n++) {
286 		seq_puts(m, "   ");
287 		describe_obj(m, objects[n]);
288 		seq_putc(m, '\n');
289 	}
290 	seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291 		   count, total_obj_size, total_gtt_size);
292 
293 	mutex_unlock(&dev->struct_mutex);
294 out:
295 	kvfree(objects);
296 	return ret;
297 }
298 
299 struct file_stats {
300 	struct drm_i915_file_private *file_priv;
301 	unsigned long count;
302 	u64 total, unbound;
303 	u64 global, shared;
304 	u64 active, inactive;
305 };
306 
307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309 	struct drm_i915_gem_object *obj = ptr;
310 	struct file_stats *stats = data;
311 	struct i915_vma *vma;
312 
313 	lockdep_assert_held(&obj->base.dev->struct_mutex);
314 
315 	stats->count++;
316 	stats->total += obj->base.size;
317 	if (!obj->bind_count)
318 		stats->unbound += obj->base.size;
319 	if (obj->base.name || obj->base.dma_buf)
320 		stats->shared += obj->base.size;
321 
322 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
323 		if (!drm_mm_node_allocated(&vma->node))
324 			continue;
325 
326 		if (i915_vma_is_ggtt(vma)) {
327 			stats->global += vma->node.size;
328 		} else {
329 			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330 
331 			if (ppgtt->vm.file != stats->file_priv)
332 				continue;
333 		}
334 
335 		if (i915_vma_is_active(vma))
336 			stats->active += vma->node.size;
337 		else
338 			stats->inactive += vma->node.size;
339 	}
340 
341 	return 0;
342 }
343 
344 #define print_file_stats(m, name, stats) do { \
345 	if (stats.count) \
346 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347 			   name, \
348 			   stats.count, \
349 			   stats.total, \
350 			   stats.active, \
351 			   stats.inactive, \
352 			   stats.global, \
353 			   stats.shared, \
354 			   stats.unbound); \
355 } while (0)
356 
357 static void print_batch_pool_stats(struct seq_file *m,
358 				   struct drm_i915_private *dev_priv)
359 {
360 	struct drm_i915_gem_object *obj;
361 	struct file_stats stats;
362 	struct intel_engine_cs *engine;
363 	enum intel_engine_id id;
364 	int j;
365 
366 	memset(&stats, 0, sizeof(stats));
367 
368 	for_each_engine(engine, dev_priv, id) {
369 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370 			list_for_each_entry(obj,
371 					    &engine->batch_pool.cache_list[j],
372 					    batch_pool_link)
373 				per_file_stats(0, obj, &stats);
374 		}
375 	}
376 
377 	print_file_stats(m, "[k]batch pool", stats);
378 }
379 
380 static int per_file_ctx_stats(int idx, void *ptr, void *data)
381 {
382 	struct i915_gem_context *ctx = ptr;
383 	struct intel_engine_cs *engine;
384 	enum intel_engine_id id;
385 
386 	for_each_engine(engine, ctx->i915, id) {
387 		struct intel_context *ce = to_intel_context(ctx, engine);
388 
389 		if (ce->state)
390 			per_file_stats(0, ce->state->obj, data);
391 		if (ce->ring)
392 			per_file_stats(0, ce->ring->vma->obj, data);
393 	}
394 
395 	return 0;
396 }
397 
398 static void print_context_stats(struct seq_file *m,
399 				struct drm_i915_private *dev_priv)
400 {
401 	struct drm_device *dev = &dev_priv->drm;
402 	struct file_stats stats;
403 	struct drm_file *file;
404 
405 	memset(&stats, 0, sizeof(stats));
406 
407 	mutex_lock(&dev->struct_mutex);
408 	if (dev_priv->kernel_context)
409 		per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410 
411 	list_for_each_entry(file, &dev->filelist, lhead) {
412 		struct drm_i915_file_private *fpriv = file->driver_priv;
413 		idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414 	}
415 	mutex_unlock(&dev->struct_mutex);
416 
417 	print_file_stats(m, "[k]contexts", stats);
418 }
419 
420 static int i915_gem_object_info(struct seq_file *m, void *data)
421 {
422 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
423 	struct drm_device *dev = &dev_priv->drm;
424 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
425 	u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426 	u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427 	struct drm_i915_gem_object *obj;
428 	unsigned int page_sizes = 0;
429 	struct drm_file *file;
430 	char buf[80];
431 	int ret;
432 
433 	ret = mutex_lock_interruptible(&dev->struct_mutex);
434 	if (ret)
435 		return ret;
436 
437 	seq_printf(m, "%u objects, %llu bytes\n",
438 		   dev_priv->mm.object_count,
439 		   dev_priv->mm.object_memory);
440 
441 	size = count = 0;
442 	mapped_size = mapped_count = 0;
443 	purgeable_size = purgeable_count = 0;
444 	huge_size = huge_count = 0;
445 
446 	spin_lock(&dev_priv->mm.obj_lock);
447 	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448 		size += obj->base.size;
449 		++count;
450 
451 		if (obj->mm.madv == I915_MADV_DONTNEED) {
452 			purgeable_size += obj->base.size;
453 			++purgeable_count;
454 		}
455 
456 		if (obj->mm.mapping) {
457 			mapped_count++;
458 			mapped_size += obj->base.size;
459 		}
460 
461 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462 			huge_count++;
463 			huge_size += obj->base.size;
464 			page_sizes |= obj->mm.page_sizes.sg;
465 		}
466 	}
467 	seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468 
469 	size = count = dpy_size = dpy_count = 0;
470 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471 		size += obj->base.size;
472 		++count;
473 
474 		if (obj->pin_global) {
475 			dpy_size += obj->base.size;
476 			++dpy_count;
477 		}
478 
479 		if (obj->mm.madv == I915_MADV_DONTNEED) {
480 			purgeable_size += obj->base.size;
481 			++purgeable_count;
482 		}
483 
484 		if (obj->mm.mapping) {
485 			mapped_count++;
486 			mapped_size += obj->base.size;
487 		}
488 
489 		if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490 			huge_count++;
491 			huge_size += obj->base.size;
492 			page_sizes |= obj->mm.page_sizes.sg;
493 		}
494 	}
495 	spin_unlock(&dev_priv->mm.obj_lock);
496 
497 	seq_printf(m, "%u bound objects, %llu bytes\n",
498 		   count, size);
499 	seq_printf(m, "%u purgeable objects, %llu bytes\n",
500 		   purgeable_count, purgeable_size);
501 	seq_printf(m, "%u mapped objects, %llu bytes\n",
502 		   mapped_count, mapped_size);
503 	seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504 		   huge_count,
505 		   stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506 		   huge_size);
507 	seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508 		   dpy_count, dpy_size);
509 
510 	seq_printf(m, "%llu [%pa] gtt total\n",
511 		   ggtt->vm.total, &ggtt->mappable_end);
512 	seq_printf(m, "Supported page sizes: %s\n",
513 		   stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514 					buf, sizeof(buf)));
515 
516 	seq_putc(m, '\n');
517 	print_batch_pool_stats(m, dev_priv);
518 	mutex_unlock(&dev->struct_mutex);
519 
520 	mutex_lock(&dev->filelist_mutex);
521 	print_context_stats(m, dev_priv);
522 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523 		struct file_stats stats;
524 		struct drm_i915_file_private *file_priv = file->driver_priv;
525 		struct i915_request *request;
526 		struct task_struct *task;
527 
528 		mutex_lock(&dev->struct_mutex);
529 
530 		memset(&stats, 0, sizeof(stats));
531 		stats.file_priv = file->driver_priv;
532 		spin_lock(&file->table_lock);
533 		idr_for_each(&file->object_idr, per_file_stats, &stats);
534 		spin_unlock(&file->table_lock);
535 		/*
536 		 * Although we have a valid reference on file->pid, that does
537 		 * not guarantee that the task_struct who called get_pid() is
538 		 * still alive (e.g. get_pid(current) => fork() => exit()).
539 		 * Therefore, we need to protect this ->comm access using RCU.
540 		 */
541 		request = list_first_entry_or_null(&file_priv->mm.request_list,
542 						   struct i915_request,
543 						   client_link);
544 		rcu_read_lock();
545 		task = pid_task(request && request->gem_context->pid ?
546 				request->gem_context->pid : file->pid,
547 				PIDTYPE_PID);
548 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
549 		rcu_read_unlock();
550 
551 		mutex_unlock(&dev->struct_mutex);
552 	}
553 	mutex_unlock(&dev->filelist_mutex);
554 
555 	return 0;
556 }
557 
558 static int i915_gem_gtt_info(struct seq_file *m, void *data)
559 {
560 	struct drm_info_node *node = m->private;
561 	struct drm_i915_private *dev_priv = node_to_i915(node);
562 	struct drm_device *dev = &dev_priv->drm;
563 	struct drm_i915_gem_object **objects;
564 	struct drm_i915_gem_object *obj;
565 	u64 total_obj_size, total_gtt_size;
566 	unsigned long nobject, n;
567 	int count, ret;
568 
569 	nobject = READ_ONCE(dev_priv->mm.object_count);
570 	objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571 	if (!objects)
572 		return -ENOMEM;
573 
574 	ret = mutex_lock_interruptible(&dev->struct_mutex);
575 	if (ret)
576 		return ret;
577 
578 	count = 0;
579 	spin_lock(&dev_priv->mm.obj_lock);
580 	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581 		objects[count++] = obj;
582 		if (count == nobject)
583 			break;
584 	}
585 	spin_unlock(&dev_priv->mm.obj_lock);
586 
587 	total_obj_size = total_gtt_size = 0;
588 	for (n = 0;  n < count; n++) {
589 		obj = objects[n];
590 
591 		seq_puts(m, "   ");
592 		describe_obj(m, obj);
593 		seq_putc(m, '\n');
594 		total_obj_size += obj->base.size;
595 		total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596 	}
597 
598 	mutex_unlock(&dev->struct_mutex);
599 
600 	seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601 		   count, total_obj_size, total_gtt_size);
602 	kvfree(objects);
603 
604 	return 0;
605 }
606 
607 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608 {
609 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
610 	struct drm_device *dev = &dev_priv->drm;
611 	struct drm_i915_gem_object *obj;
612 	struct intel_engine_cs *engine;
613 	enum intel_engine_id id;
614 	int total = 0;
615 	int ret, j;
616 
617 	ret = mutex_lock_interruptible(&dev->struct_mutex);
618 	if (ret)
619 		return ret;
620 
621 	for_each_engine(engine, dev_priv, id) {
622 		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623 			int count;
624 
625 			count = 0;
626 			list_for_each_entry(obj,
627 					    &engine->batch_pool.cache_list[j],
628 					    batch_pool_link)
629 				count++;
630 			seq_printf(m, "%s cache[%d]: %d objects\n",
631 				   engine->name, j, count);
632 
633 			list_for_each_entry(obj,
634 					    &engine->batch_pool.cache_list[j],
635 					    batch_pool_link) {
636 				seq_puts(m, "   ");
637 				describe_obj(m, obj);
638 				seq_putc(m, '\n');
639 			}
640 
641 			total += count;
642 		}
643 	}
644 
645 	seq_printf(m, "total: %d\n", total);
646 
647 	mutex_unlock(&dev->struct_mutex);
648 
649 	return 0;
650 }
651 
652 static void gen8_display_interrupt_info(struct seq_file *m)
653 {
654 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
655 	int pipe;
656 
657 	for_each_pipe(dev_priv, pipe) {
658 		enum intel_display_power_domain power_domain;
659 
660 		power_domain = POWER_DOMAIN_PIPE(pipe);
661 		if (!intel_display_power_get_if_enabled(dev_priv,
662 							power_domain)) {
663 			seq_printf(m, "Pipe %c power disabled\n",
664 				   pipe_name(pipe));
665 			continue;
666 		}
667 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
668 			   pipe_name(pipe),
669 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
671 			   pipe_name(pipe),
672 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673 		seq_printf(m, "Pipe %c IER:\t%08x\n",
674 			   pipe_name(pipe),
675 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
676 
677 		intel_display_power_put(dev_priv, power_domain);
678 	}
679 
680 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681 		   I915_READ(GEN8_DE_PORT_IMR));
682 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683 		   I915_READ(GEN8_DE_PORT_IIR));
684 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685 		   I915_READ(GEN8_DE_PORT_IER));
686 
687 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688 		   I915_READ(GEN8_DE_MISC_IMR));
689 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690 		   I915_READ(GEN8_DE_MISC_IIR));
691 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692 		   I915_READ(GEN8_DE_MISC_IER));
693 
694 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
695 		   I915_READ(GEN8_PCU_IMR));
696 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
697 		   I915_READ(GEN8_PCU_IIR));
698 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
699 		   I915_READ(GEN8_PCU_IER));
700 }
701 
702 static int i915_interrupt_info(struct seq_file *m, void *data)
703 {
704 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
705 	struct intel_engine_cs *engine;
706 	enum intel_engine_id id;
707 	int i, pipe;
708 
709 	intel_runtime_pm_get(dev_priv);
710 
711 	if (IS_CHERRYVIEW(dev_priv)) {
712 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
713 			   I915_READ(GEN8_MASTER_IRQ));
714 
715 		seq_printf(m, "Display IER:\t%08x\n",
716 			   I915_READ(VLV_IER));
717 		seq_printf(m, "Display IIR:\t%08x\n",
718 			   I915_READ(VLV_IIR));
719 		seq_printf(m, "Display IIR_RW:\t%08x\n",
720 			   I915_READ(VLV_IIR_RW));
721 		seq_printf(m, "Display IMR:\t%08x\n",
722 			   I915_READ(VLV_IMR));
723 		for_each_pipe(dev_priv, pipe) {
724 			enum intel_display_power_domain power_domain;
725 
726 			power_domain = POWER_DOMAIN_PIPE(pipe);
727 			if (!intel_display_power_get_if_enabled(dev_priv,
728 								power_domain)) {
729 				seq_printf(m, "Pipe %c power disabled\n",
730 					   pipe_name(pipe));
731 				continue;
732 			}
733 
734 			seq_printf(m, "Pipe %c stat:\t%08x\n",
735 				   pipe_name(pipe),
736 				   I915_READ(PIPESTAT(pipe)));
737 
738 			intel_display_power_put(dev_priv, power_domain);
739 		}
740 
741 		intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742 		seq_printf(m, "Port hotplug:\t%08x\n",
743 			   I915_READ(PORT_HOTPLUG_EN));
744 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745 			   I915_READ(VLV_DPFLIPSTAT));
746 		seq_printf(m, "DPINVGTT:\t%08x\n",
747 			   I915_READ(DPINVGTT));
748 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
749 
750 		for (i = 0; i < 4; i++) {
751 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752 				   i, I915_READ(GEN8_GT_IMR(i)));
753 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754 				   i, I915_READ(GEN8_GT_IIR(i)));
755 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756 				   i, I915_READ(GEN8_GT_IER(i)));
757 		}
758 
759 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
760 			   I915_READ(GEN8_PCU_IMR));
761 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
762 			   I915_READ(GEN8_PCU_IIR));
763 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
764 			   I915_READ(GEN8_PCU_IER));
765 	} else if (INTEL_GEN(dev_priv) >= 11) {
766 		seq_printf(m, "Master Interrupt Control:  %08x\n",
767 			   I915_READ(GEN11_GFX_MSTR_IRQ));
768 
769 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
770 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
772 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
774 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
778 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
780 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781 
782 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
783 			   I915_READ(GEN11_DISPLAY_INT_CTL));
784 
785 		gen8_display_interrupt_info(m);
786 	} else if (INTEL_GEN(dev_priv) >= 8) {
787 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
788 			   I915_READ(GEN8_MASTER_IRQ));
789 
790 		for (i = 0; i < 4; i++) {
791 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792 				   i, I915_READ(GEN8_GT_IMR(i)));
793 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794 				   i, I915_READ(GEN8_GT_IIR(i)));
795 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796 				   i, I915_READ(GEN8_GT_IER(i)));
797 		}
798 
799 		gen8_display_interrupt_info(m);
800 	} else if (IS_VALLEYVIEW(dev_priv)) {
801 		seq_printf(m, "Display IER:\t%08x\n",
802 			   I915_READ(VLV_IER));
803 		seq_printf(m, "Display IIR:\t%08x\n",
804 			   I915_READ(VLV_IIR));
805 		seq_printf(m, "Display IIR_RW:\t%08x\n",
806 			   I915_READ(VLV_IIR_RW));
807 		seq_printf(m, "Display IMR:\t%08x\n",
808 			   I915_READ(VLV_IMR));
809 		for_each_pipe(dev_priv, pipe) {
810 			enum intel_display_power_domain power_domain;
811 
812 			power_domain = POWER_DOMAIN_PIPE(pipe);
813 			if (!intel_display_power_get_if_enabled(dev_priv,
814 								power_domain)) {
815 				seq_printf(m, "Pipe %c power disabled\n",
816 					   pipe_name(pipe));
817 				continue;
818 			}
819 
820 			seq_printf(m, "Pipe %c stat:\t%08x\n",
821 				   pipe_name(pipe),
822 				   I915_READ(PIPESTAT(pipe)));
823 			intel_display_power_put(dev_priv, power_domain);
824 		}
825 
826 		seq_printf(m, "Master IER:\t%08x\n",
827 			   I915_READ(VLV_MASTER_IER));
828 
829 		seq_printf(m, "Render IER:\t%08x\n",
830 			   I915_READ(GTIER));
831 		seq_printf(m, "Render IIR:\t%08x\n",
832 			   I915_READ(GTIIR));
833 		seq_printf(m, "Render IMR:\t%08x\n",
834 			   I915_READ(GTIMR));
835 
836 		seq_printf(m, "PM IER:\t\t%08x\n",
837 			   I915_READ(GEN6_PMIER));
838 		seq_printf(m, "PM IIR:\t\t%08x\n",
839 			   I915_READ(GEN6_PMIIR));
840 		seq_printf(m, "PM IMR:\t\t%08x\n",
841 			   I915_READ(GEN6_PMIMR));
842 
843 		seq_printf(m, "Port hotplug:\t%08x\n",
844 			   I915_READ(PORT_HOTPLUG_EN));
845 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846 			   I915_READ(VLV_DPFLIPSTAT));
847 		seq_printf(m, "DPINVGTT:\t%08x\n",
848 			   I915_READ(DPINVGTT));
849 
850 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
851 		seq_printf(m, "Interrupt enable:    %08x\n",
852 			   I915_READ(IER));
853 		seq_printf(m, "Interrupt identity:  %08x\n",
854 			   I915_READ(IIR));
855 		seq_printf(m, "Interrupt mask:      %08x\n",
856 			   I915_READ(IMR));
857 		for_each_pipe(dev_priv, pipe)
858 			seq_printf(m, "Pipe %c stat:         %08x\n",
859 				   pipe_name(pipe),
860 				   I915_READ(PIPESTAT(pipe)));
861 	} else {
862 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
863 			   I915_READ(DEIER));
864 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
865 			   I915_READ(DEIIR));
866 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
867 			   I915_READ(DEIMR));
868 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
869 			   I915_READ(SDEIER));
870 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
871 			   I915_READ(SDEIIR));
872 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
873 			   I915_READ(SDEIMR));
874 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
875 			   I915_READ(GTIER));
876 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
877 			   I915_READ(GTIIR));
878 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
879 			   I915_READ(GTIMR));
880 	}
881 
882 	if (INTEL_GEN(dev_priv) >= 11) {
883 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
884 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
886 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
895 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901 
902 	} else if (INTEL_GEN(dev_priv) >= 6) {
903 		for_each_engine(engine, dev_priv, id) {
904 			seq_printf(m,
905 				   "Graphics Interrupt mask (%s):	%08x\n",
906 				   engine->name, I915_READ_IMR(engine));
907 		}
908 	}
909 
910 	intel_runtime_pm_put(dev_priv);
911 
912 	return 0;
913 }
914 
915 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916 {
917 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
918 	struct drm_device *dev = &dev_priv->drm;
919 	int i, ret;
920 
921 	ret = mutex_lock_interruptible(&dev->struct_mutex);
922 	if (ret)
923 		return ret;
924 
925 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
927 		struct i915_vma *vma = dev_priv->fence_regs[i].vma;
928 
929 		seq_printf(m, "Fence %d, pin count = %d, object = ",
930 			   i, dev_priv->fence_regs[i].pin_count);
931 		if (!vma)
932 			seq_puts(m, "unused");
933 		else
934 			describe_obj(m, vma->obj);
935 		seq_putc(m, '\n');
936 	}
937 
938 	mutex_unlock(&dev->struct_mutex);
939 	return 0;
940 }
941 
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
943 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944 			      size_t count, loff_t *pos)
945 {
946 	struct i915_gpu_state *error = file->private_data;
947 	struct drm_i915_error_state_buf str;
948 	ssize_t ret;
949 	loff_t tmp;
950 
951 	if (!error)
952 		return 0;
953 
954 	ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955 	if (ret)
956 		return ret;
957 
958 	ret = i915_error_state_to_str(&str, error);
959 	if (ret)
960 		goto out;
961 
962 	tmp = 0;
963 	ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964 	if (ret < 0)
965 		goto out;
966 
967 	*pos = str.start + ret;
968 out:
969 	i915_error_state_buf_release(&str);
970 	return ret;
971 }
972 
973 static int gpu_state_release(struct inode *inode, struct file *file)
974 {
975 	i915_gpu_state_put(file->private_data);
976 	return 0;
977 }
978 
979 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980 {
981 	struct drm_i915_private *i915 = inode->i_private;
982 	struct i915_gpu_state *gpu;
983 
984 	intel_runtime_pm_get(i915);
985 	gpu = i915_capture_gpu_state(i915);
986 	intel_runtime_pm_put(i915);
987 	if (!gpu)
988 		return -ENOMEM;
989 
990 	file->private_data = gpu;
991 	return 0;
992 }
993 
994 static const struct file_operations i915_gpu_info_fops = {
995 	.owner = THIS_MODULE,
996 	.open = i915_gpu_info_open,
997 	.read = gpu_state_read,
998 	.llseek = default_llseek,
999 	.release = gpu_state_release,
1000 };
1001 
1002 static ssize_t
1003 i915_error_state_write(struct file *filp,
1004 		       const char __user *ubuf,
1005 		       size_t cnt,
1006 		       loff_t *ppos)
1007 {
1008 	struct i915_gpu_state *error = filp->private_data;
1009 
1010 	if (!error)
1011 		return 0;
1012 
1013 	DRM_DEBUG_DRIVER("Resetting error state\n");
1014 	i915_reset_error_state(error->i915);
1015 
1016 	return cnt;
1017 }
1018 
1019 static int i915_error_state_open(struct inode *inode, struct file *file)
1020 {
1021 	file->private_data = i915_first_error_state(inode->i_private);
1022 	return 0;
1023 }
1024 
1025 static const struct file_operations i915_error_state_fops = {
1026 	.owner = THIS_MODULE,
1027 	.open = i915_error_state_open,
1028 	.read = gpu_state_read,
1029 	.write = i915_error_state_write,
1030 	.llseek = default_llseek,
1031 	.release = gpu_state_release,
1032 };
1033 #endif
1034 
1035 static int
1036 i915_next_seqno_set(void *data, u64 val)
1037 {
1038 	struct drm_i915_private *dev_priv = data;
1039 	struct drm_device *dev = &dev_priv->drm;
1040 	int ret;
1041 
1042 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1043 	if (ret)
1044 		return ret;
1045 
1046 	intel_runtime_pm_get(dev_priv);
1047 	ret = i915_gem_set_global_seqno(dev, val);
1048 	intel_runtime_pm_put(dev_priv);
1049 
1050 	mutex_unlock(&dev->struct_mutex);
1051 
1052 	return ret;
1053 }
1054 
1055 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056 			NULL, i915_next_seqno_set,
1057 			"0x%llx\n");
1058 
1059 static int i915_frequency_info(struct seq_file *m, void *unused)
1060 {
1061 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1062 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1063 	int ret = 0;
1064 
1065 	intel_runtime_pm_get(dev_priv);
1066 
1067 	if (IS_GEN5(dev_priv)) {
1068 		u16 rgvswctl = I915_READ16(MEMSWCTL);
1069 		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070 
1071 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074 			   MEMSTAT_VID_SHIFT);
1075 		seq_printf(m, "Current P-state: %d\n",
1076 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1077 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1078 		u32 rpmodectl, freq_sts;
1079 
1080 		mutex_lock(&dev_priv->pcu_lock);
1081 
1082 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083 		seq_printf(m, "Video Turbo Mode: %s\n",
1084 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085 		seq_printf(m, "HW control enabled: %s\n",
1086 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1087 		seq_printf(m, "SW control enabled: %s\n",
1088 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089 				  GEN6_RP_MEDIA_SW_MODE));
1090 
1091 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094 
1095 		seq_printf(m, "actual GPU freq: %d MHz\n",
1096 			   intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097 
1098 		seq_printf(m, "current GPU freq: %d MHz\n",
1099 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1100 
1101 		seq_printf(m, "max GPU freq: %d MHz\n",
1102 			   intel_gpu_freq(dev_priv, rps->max_freq));
1103 
1104 		seq_printf(m, "min GPU freq: %d MHz\n",
1105 			   intel_gpu_freq(dev_priv, rps->min_freq));
1106 
1107 		seq_printf(m, "idle GPU freq: %d MHz\n",
1108 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1109 
1110 		seq_printf(m,
1111 			   "efficient (RPe) frequency: %d MHz\n",
1112 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1113 		mutex_unlock(&dev_priv->pcu_lock);
1114 	} else if (INTEL_GEN(dev_priv) >= 6) {
1115 		u32 rp_state_limits;
1116 		u32 gt_perf_status;
1117 		u32 rp_state_cap;
1118 		u32 rpmodectl, rpinclimit, rpdeclimit;
1119 		u32 rpstat, cagf, reqf;
1120 		u32 rpupei, rpcurup, rpprevup;
1121 		u32 rpdownei, rpcurdown, rpprevdown;
1122 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1123 		int max_freq;
1124 
1125 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1126 		if (IS_GEN9_LP(dev_priv)) {
1127 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129 		} else {
1130 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132 		}
1133 
1134 		/* RPSTAT1 is in the GT power well */
1135 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1136 
1137 		reqf = I915_READ(GEN6_RPNSWREQ);
1138 		if (INTEL_GEN(dev_priv) >= 9)
1139 			reqf >>= 23;
1140 		else {
1141 			reqf &= ~GEN6_TURBO_DISABLE;
1142 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1143 				reqf >>= 24;
1144 			else
1145 				reqf >>= 25;
1146 		}
1147 		reqf = intel_gpu_freq(dev_priv, reqf);
1148 
1149 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152 
1153 		rpstat = I915_READ(GEN6_RPSTAT1);
1154 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1160 		cagf = intel_gpu_freq(dev_priv,
1161 				      intel_get_cagf(dev_priv, rpstat));
1162 
1163 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1164 
1165 		if (INTEL_GEN(dev_priv) >= 11) {
1166 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168 			/*
1169 			 * The equivalent to the PM ISR & IIR cannot be read
1170 			 * without affecting the current state of the system
1171 			 */
1172 			pm_isr = 0;
1173 			pm_iir = 0;
1174 		} else if (INTEL_GEN(dev_priv) >= 8) {
1175 			pm_ier = I915_READ(GEN8_GT_IER(2));
1176 			pm_imr = I915_READ(GEN8_GT_IMR(2));
1177 			pm_isr = I915_READ(GEN8_GT_ISR(2));
1178 			pm_iir = I915_READ(GEN8_GT_IIR(2));
1179 		} else {
1180 			pm_ier = I915_READ(GEN6_PMIER);
1181 			pm_imr = I915_READ(GEN6_PMIMR);
1182 			pm_isr = I915_READ(GEN6_PMISR);
1183 			pm_iir = I915_READ(GEN6_PMIIR);
1184 		}
1185 		pm_mask = I915_READ(GEN6_PMINTRMSK);
1186 
1187 		seq_printf(m, "Video Turbo Mode: %s\n",
1188 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189 		seq_printf(m, "HW control enabled: %s\n",
1190 			   yesno(rpmodectl & GEN6_RP_ENABLE));
1191 		seq_printf(m, "SW control enabled: %s\n",
1192 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193 				  GEN6_RP_MEDIA_SW_MODE));
1194 
1195 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196 			   pm_ier, pm_imr, pm_mask);
1197 		if (INTEL_GEN(dev_priv) <= 10)
1198 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199 				   pm_isr, pm_iir);
1200 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1201 			   rps->pm_intrmsk_mbz);
1202 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1203 		seq_printf(m, "Render p-state ratio: %d\n",
1204 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1205 		seq_printf(m, "Render p-state VID: %d\n",
1206 			   gt_perf_status & 0xff);
1207 		seq_printf(m, "Render p-state limit: %d\n",
1208 			   rp_state_limits & 0xff);
1209 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1213 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1214 		seq_printf(m, "CAGF: %dMHz\n", cagf);
1215 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1221 		seq_printf(m, "Up threshold: %d%%\n",
1222 			   rps->power.up_threshold);
1223 
1224 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1225 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1226 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1227 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1228 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1229 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1230 		seq_printf(m, "Down threshold: %d%%\n",
1231 			   rps->power.down_threshold);
1232 
1233 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1234 			    rp_state_cap >> 16) & 0xff;
1235 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1236 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1237 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1238 			   intel_gpu_freq(dev_priv, max_freq));
1239 
1240 		max_freq = (rp_state_cap & 0xff00) >> 8;
1241 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1242 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1243 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1244 			   intel_gpu_freq(dev_priv, max_freq));
1245 
1246 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1247 			    rp_state_cap >> 0) & 0xff;
1248 		max_freq *= (IS_GEN9_BC(dev_priv) ||
1249 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1250 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1251 			   intel_gpu_freq(dev_priv, max_freq));
1252 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
1253 			   intel_gpu_freq(dev_priv, rps->max_freq));
1254 
1255 		seq_printf(m, "Current freq: %d MHz\n",
1256 			   intel_gpu_freq(dev_priv, rps->cur_freq));
1257 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
1258 		seq_printf(m, "Idle freq: %d MHz\n",
1259 			   intel_gpu_freq(dev_priv, rps->idle_freq));
1260 		seq_printf(m, "Min freq: %d MHz\n",
1261 			   intel_gpu_freq(dev_priv, rps->min_freq));
1262 		seq_printf(m, "Boost freq: %d MHz\n",
1263 			   intel_gpu_freq(dev_priv, rps->boost_freq));
1264 		seq_printf(m, "Max freq: %d MHz\n",
1265 			   intel_gpu_freq(dev_priv, rps->max_freq));
1266 		seq_printf(m,
1267 			   "efficient (RPe) frequency: %d MHz\n",
1268 			   intel_gpu_freq(dev_priv, rps->efficient_freq));
1269 	} else {
1270 		seq_puts(m, "no P-state info available\n");
1271 	}
1272 
1273 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1274 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1275 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1276 
1277 	intel_runtime_pm_put(dev_priv);
1278 	return ret;
1279 }
1280 
1281 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1282 			       struct seq_file *m,
1283 			       struct intel_instdone *instdone)
1284 {
1285 	int slice;
1286 	int subslice;
1287 
1288 	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1289 		   instdone->instdone);
1290 
1291 	if (INTEL_GEN(dev_priv) <= 3)
1292 		return;
1293 
1294 	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1295 		   instdone->slice_common);
1296 
1297 	if (INTEL_GEN(dev_priv) <= 6)
1298 		return;
1299 
1300 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1301 		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302 			   slice, subslice, instdone->sampler[slice][subslice]);
1303 
1304 	for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1305 		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306 			   slice, subslice, instdone->row[slice][subslice]);
1307 }
1308 
1309 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1310 {
1311 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1312 	struct intel_engine_cs *engine;
1313 	u64 acthd[I915_NUM_ENGINES];
1314 	u32 seqno[I915_NUM_ENGINES];
1315 	struct intel_instdone instdone;
1316 	enum intel_engine_id id;
1317 
1318 	if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1319 		seq_puts(m, "Wedged\n");
1320 	if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1321 		seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1322 	if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1323 		seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1324 	if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1325 		seq_puts(m, "Waiter holding struct mutex\n");
1326 	if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1327 		seq_puts(m, "struct_mutex blocked for reset\n");
1328 
1329 	if (!i915_modparams.enable_hangcheck) {
1330 		seq_puts(m, "Hangcheck disabled\n");
1331 		return 0;
1332 	}
1333 
1334 	intel_runtime_pm_get(dev_priv);
1335 
1336 	for_each_engine(engine, dev_priv, id) {
1337 		acthd[id] = intel_engine_get_active_head(engine);
1338 		seqno[id] = intel_engine_get_seqno(engine);
1339 	}
1340 
1341 	intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1342 
1343 	intel_runtime_pm_put(dev_priv);
1344 
1345 	if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1346 		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1347 			   jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1348 					    jiffies));
1349 	else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1350 		seq_puts(m, "Hangcheck active, work pending\n");
1351 	else
1352 		seq_puts(m, "Hangcheck inactive\n");
1353 
1354 	seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1355 
1356 	for_each_engine(engine, dev_priv, id) {
1357 		struct intel_breadcrumbs *b = &engine->breadcrumbs;
1358 		struct rb_node *rb;
1359 
1360 		seq_printf(m, "%s:\n", engine->name);
1361 		seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1362 			   engine->hangcheck.seqno, seqno[id],
1363 			   intel_engine_last_submit(engine));
1364 		seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1365 			   yesno(intel_engine_has_waiter(engine)),
1366 			   yesno(test_bit(engine->id,
1367 					  &dev_priv->gpu_error.missed_irq_rings)),
1368 			   yesno(engine->hangcheck.stalled),
1369 			   yesno(engine->hangcheck.wedged));
1370 
1371 		spin_lock_irq(&b->rb_lock);
1372 		for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1373 			struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1374 
1375 			seq_printf(m, "\t%s [%d] waiting for %x\n",
1376 				   w->tsk->comm, w->tsk->pid, w->seqno);
1377 		}
1378 		spin_unlock_irq(&b->rb_lock);
1379 
1380 		seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1381 			   (long long)engine->hangcheck.acthd,
1382 			   (long long)acthd[id]);
1383 		seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1384 			   hangcheck_action_to_str(engine->hangcheck.action),
1385 			   engine->hangcheck.action,
1386 			   jiffies_to_msecs(jiffies -
1387 					    engine->hangcheck.action_timestamp));
1388 
1389 		if (engine->id == RCS) {
1390 			seq_puts(m, "\tinstdone read =\n");
1391 
1392 			i915_instdone_info(dev_priv, m, &instdone);
1393 
1394 			seq_puts(m, "\tinstdone accu =\n");
1395 
1396 			i915_instdone_info(dev_priv, m,
1397 					   &engine->hangcheck.instdone);
1398 		}
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 static int i915_reset_info(struct seq_file *m, void *unused)
1405 {
1406 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1407 	struct i915_gpu_error *error = &dev_priv->gpu_error;
1408 	struct intel_engine_cs *engine;
1409 	enum intel_engine_id id;
1410 
1411 	seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1412 
1413 	for_each_engine(engine, dev_priv, id) {
1414 		seq_printf(m, "%s = %u\n", engine->name,
1415 			   i915_reset_engine_count(error, engine));
1416 	}
1417 
1418 	return 0;
1419 }
1420 
1421 static int ironlake_drpc_info(struct seq_file *m)
1422 {
1423 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1424 	u32 rgvmodectl, rstdbyctl;
1425 	u16 crstandvid;
1426 
1427 	rgvmodectl = I915_READ(MEMMODECTL);
1428 	rstdbyctl = I915_READ(RSTDBYCTL);
1429 	crstandvid = I915_READ16(CRSTANDVID);
1430 
1431 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1432 	seq_printf(m, "Boost freq: %d\n",
1433 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1434 		   MEMMODE_BOOST_FREQ_SHIFT);
1435 	seq_printf(m, "HW control enabled: %s\n",
1436 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1437 	seq_printf(m, "SW control enabled: %s\n",
1438 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1439 	seq_printf(m, "Gated voltage change: %s\n",
1440 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1441 	seq_printf(m, "Starting frequency: P%d\n",
1442 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1443 	seq_printf(m, "Max P-state: P%d\n",
1444 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1445 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1446 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1447 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1448 	seq_printf(m, "Render standby enabled: %s\n",
1449 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1450 	seq_puts(m, "Current RS state: ");
1451 	switch (rstdbyctl & RSX_STATUS_MASK) {
1452 	case RSX_STATUS_ON:
1453 		seq_puts(m, "on\n");
1454 		break;
1455 	case RSX_STATUS_RC1:
1456 		seq_puts(m, "RC1\n");
1457 		break;
1458 	case RSX_STATUS_RC1E:
1459 		seq_puts(m, "RC1E\n");
1460 		break;
1461 	case RSX_STATUS_RS1:
1462 		seq_puts(m, "RS1\n");
1463 		break;
1464 	case RSX_STATUS_RS2:
1465 		seq_puts(m, "RS2 (RC6)\n");
1466 		break;
1467 	case RSX_STATUS_RS3:
1468 		seq_puts(m, "RC3 (RC6+)\n");
1469 		break;
1470 	default:
1471 		seq_puts(m, "unknown\n");
1472 		break;
1473 	}
1474 
1475 	return 0;
1476 }
1477 
1478 static int i915_forcewake_domains(struct seq_file *m, void *data)
1479 {
1480 	struct drm_i915_private *i915 = node_to_i915(m->private);
1481 	struct intel_uncore_forcewake_domain *fw_domain;
1482 	unsigned int tmp;
1483 
1484 	seq_printf(m, "user.bypass_count = %u\n",
1485 		   i915->uncore.user_forcewake.count);
1486 
1487 	for_each_fw_domain(fw_domain, i915, tmp)
1488 		seq_printf(m, "%s.wake_count = %u\n",
1489 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1490 			   READ_ONCE(fw_domain->wake_count));
1491 
1492 	return 0;
1493 }
1494 
1495 static void print_rc6_res(struct seq_file *m,
1496 			  const char *title,
1497 			  const i915_reg_t reg)
1498 {
1499 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1500 
1501 	seq_printf(m, "%s %u (%llu us)\n",
1502 		   title, I915_READ(reg),
1503 		   intel_rc6_residency_us(dev_priv, reg));
1504 }
1505 
1506 static int vlv_drpc_info(struct seq_file *m)
1507 {
1508 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1509 	u32 rcctl1, pw_status;
1510 
1511 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1512 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1513 
1514 	seq_printf(m, "RC6 Enabled: %s\n",
1515 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1516 					GEN6_RC_CTL_EI_MODE(1))));
1517 	seq_printf(m, "Render Power Well: %s\n",
1518 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1519 	seq_printf(m, "Media Power Well: %s\n",
1520 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1521 
1522 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1523 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1524 
1525 	return i915_forcewake_domains(m, NULL);
1526 }
1527 
1528 static int gen6_drpc_info(struct seq_file *m)
1529 {
1530 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1531 	u32 gt_core_status, rcctl1, rc6vids = 0;
1532 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1533 
1534 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1535 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1536 
1537 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1538 	if (INTEL_GEN(dev_priv) >= 9) {
1539 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1540 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1541 	}
1542 
1543 	if (INTEL_GEN(dev_priv) <= 7) {
1544 		mutex_lock(&dev_priv->pcu_lock);
1545 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1546 				       &rc6vids);
1547 		mutex_unlock(&dev_priv->pcu_lock);
1548 	}
1549 
1550 	seq_printf(m, "RC1e Enabled: %s\n",
1551 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1552 	seq_printf(m, "RC6 Enabled: %s\n",
1553 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1554 	if (INTEL_GEN(dev_priv) >= 9) {
1555 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1556 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1557 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1558 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1559 	}
1560 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1561 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1562 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1563 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1564 	seq_puts(m, "Current RC state: ");
1565 	switch (gt_core_status & GEN6_RCn_MASK) {
1566 	case GEN6_RC0:
1567 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1568 			seq_puts(m, "Core Power Down\n");
1569 		else
1570 			seq_puts(m, "on\n");
1571 		break;
1572 	case GEN6_RC3:
1573 		seq_puts(m, "RC3\n");
1574 		break;
1575 	case GEN6_RC6:
1576 		seq_puts(m, "RC6\n");
1577 		break;
1578 	case GEN6_RC7:
1579 		seq_puts(m, "RC7\n");
1580 		break;
1581 	default:
1582 		seq_puts(m, "Unknown\n");
1583 		break;
1584 	}
1585 
1586 	seq_printf(m, "Core Power Down: %s\n",
1587 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1588 	if (INTEL_GEN(dev_priv) >= 9) {
1589 		seq_printf(m, "Render Power Well: %s\n",
1590 			(gen9_powergate_status &
1591 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1592 		seq_printf(m, "Media Power Well: %s\n",
1593 			(gen9_powergate_status &
1594 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1595 	}
1596 
1597 	/* Not exactly sure what this is */
1598 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1599 		      GEN6_GT_GFX_RC6_LOCKED);
1600 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1601 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1602 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1603 
1604 	if (INTEL_GEN(dev_priv) <= 7) {
1605 		seq_printf(m, "RC6   voltage: %dmV\n",
1606 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1607 		seq_printf(m, "RC6+  voltage: %dmV\n",
1608 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1609 		seq_printf(m, "RC6++ voltage: %dmV\n",
1610 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1611 	}
1612 
1613 	return i915_forcewake_domains(m, NULL);
1614 }
1615 
1616 static int i915_drpc_info(struct seq_file *m, void *unused)
1617 {
1618 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1619 	int err;
1620 
1621 	intel_runtime_pm_get(dev_priv);
1622 
1623 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1624 		err = vlv_drpc_info(m);
1625 	else if (INTEL_GEN(dev_priv) >= 6)
1626 		err = gen6_drpc_info(m);
1627 	else
1628 		err = ironlake_drpc_info(m);
1629 
1630 	intel_runtime_pm_put(dev_priv);
1631 
1632 	return err;
1633 }
1634 
1635 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1636 {
1637 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1638 
1639 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640 		   dev_priv->fb_tracking.busy_bits);
1641 
1642 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643 		   dev_priv->fb_tracking.flip_bits);
1644 
1645 	return 0;
1646 }
1647 
1648 static int i915_fbc_status(struct seq_file *m, void *unused)
1649 {
1650 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1651 	struct intel_fbc *fbc = &dev_priv->fbc;
1652 
1653 	if (!HAS_FBC(dev_priv))
1654 		return -ENODEV;
1655 
1656 	intel_runtime_pm_get(dev_priv);
1657 	mutex_lock(&fbc->lock);
1658 
1659 	if (intel_fbc_is_active(dev_priv))
1660 		seq_puts(m, "FBC enabled\n");
1661 	else
1662 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1663 
1664 	if (intel_fbc_is_active(dev_priv)) {
1665 		u32 mask;
1666 
1667 		if (INTEL_GEN(dev_priv) >= 8)
1668 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1669 		else if (INTEL_GEN(dev_priv) >= 7)
1670 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1671 		else if (INTEL_GEN(dev_priv) >= 5)
1672 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1673 		else if (IS_G4X(dev_priv))
1674 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1675 		else
1676 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1677 							FBC_STAT_COMPRESSED);
1678 
1679 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1680 	}
1681 
1682 	mutex_unlock(&fbc->lock);
1683 	intel_runtime_pm_put(dev_priv);
1684 
1685 	return 0;
1686 }
1687 
1688 static int i915_fbc_false_color_get(void *data, u64 *val)
1689 {
1690 	struct drm_i915_private *dev_priv = data;
1691 
1692 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1693 		return -ENODEV;
1694 
1695 	*val = dev_priv->fbc.false_color;
1696 
1697 	return 0;
1698 }
1699 
1700 static int i915_fbc_false_color_set(void *data, u64 val)
1701 {
1702 	struct drm_i915_private *dev_priv = data;
1703 	u32 reg;
1704 
1705 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1706 		return -ENODEV;
1707 
1708 	mutex_lock(&dev_priv->fbc.lock);
1709 
1710 	reg = I915_READ(ILK_DPFC_CONTROL);
1711 	dev_priv->fbc.false_color = val;
1712 
1713 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1714 		   (reg | FBC_CTL_FALSE_COLOR) :
1715 		   (reg & ~FBC_CTL_FALSE_COLOR));
1716 
1717 	mutex_unlock(&dev_priv->fbc.lock);
1718 	return 0;
1719 }
1720 
1721 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1722 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1723 			"%llu\n");
1724 
1725 static int i915_ips_status(struct seq_file *m, void *unused)
1726 {
1727 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1728 
1729 	if (!HAS_IPS(dev_priv))
1730 		return -ENODEV;
1731 
1732 	intel_runtime_pm_get(dev_priv);
1733 
1734 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1735 		   yesno(i915_modparams.enable_ips));
1736 
1737 	if (INTEL_GEN(dev_priv) >= 8) {
1738 		seq_puts(m, "Currently: unknown\n");
1739 	} else {
1740 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1741 			seq_puts(m, "Currently: enabled\n");
1742 		else
1743 			seq_puts(m, "Currently: disabled\n");
1744 	}
1745 
1746 	intel_runtime_pm_put(dev_priv);
1747 
1748 	return 0;
1749 }
1750 
1751 static int i915_sr_status(struct seq_file *m, void *unused)
1752 {
1753 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1754 	bool sr_enabled = false;
1755 
1756 	intel_runtime_pm_get(dev_priv);
1757 	intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1758 
1759 	if (INTEL_GEN(dev_priv) >= 9)
1760 		/* no global SR status; inspect per-plane WM */;
1761 	else if (HAS_PCH_SPLIT(dev_priv))
1762 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1763 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1764 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1765 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1766 	else if (IS_I915GM(dev_priv))
1767 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1768 	else if (IS_PINEVIEW(dev_priv))
1769 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1770 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1771 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1772 
1773 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1774 	intel_runtime_pm_put(dev_priv);
1775 
1776 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1777 
1778 	return 0;
1779 }
1780 
1781 static int i915_emon_status(struct seq_file *m, void *unused)
1782 {
1783 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784 	struct drm_device *dev = &dev_priv->drm;
1785 	unsigned long temp, chipset, gfx;
1786 	int ret;
1787 
1788 	if (!IS_GEN5(dev_priv))
1789 		return -ENODEV;
1790 
1791 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1792 	if (ret)
1793 		return ret;
1794 
1795 	temp = i915_mch_val(dev_priv);
1796 	chipset = i915_chipset_val(dev_priv);
1797 	gfx = i915_gfx_val(dev_priv);
1798 	mutex_unlock(&dev->struct_mutex);
1799 
1800 	seq_printf(m, "GMCH temp: %ld\n", temp);
1801 	seq_printf(m, "Chipset power: %ld\n", chipset);
1802 	seq_printf(m, "GFX power: %ld\n", gfx);
1803 	seq_printf(m, "Total power: %ld\n", chipset + gfx);
1804 
1805 	return 0;
1806 }
1807 
1808 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1809 {
1810 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1811 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
1812 	unsigned int max_gpu_freq, min_gpu_freq;
1813 	int gpu_freq, ia_freq;
1814 	int ret;
1815 
1816 	if (!HAS_LLC(dev_priv))
1817 		return -ENODEV;
1818 
1819 	intel_runtime_pm_get(dev_priv);
1820 
1821 	ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1822 	if (ret)
1823 		goto out;
1824 
1825 	min_gpu_freq = rps->min_freq;
1826 	max_gpu_freq = rps->max_freq;
1827 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1828 		/* Convert GT frequency to 50 HZ units */
1829 		min_gpu_freq /= GEN9_FREQ_SCALER;
1830 		max_gpu_freq /= GEN9_FREQ_SCALER;
1831 	}
1832 
1833 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1834 
1835 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1836 		ia_freq = gpu_freq;
1837 		sandybridge_pcode_read(dev_priv,
1838 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1839 				       &ia_freq);
1840 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1841 			   intel_gpu_freq(dev_priv, (gpu_freq *
1842 						     (IS_GEN9_BC(dev_priv) ||
1843 						      INTEL_GEN(dev_priv) >= 10 ?
1844 						      GEN9_FREQ_SCALER : 1))),
1845 			   ((ia_freq >> 0) & 0xff) * 100,
1846 			   ((ia_freq >> 8) & 0xff) * 100);
1847 	}
1848 
1849 	mutex_unlock(&dev_priv->pcu_lock);
1850 
1851 out:
1852 	intel_runtime_pm_put(dev_priv);
1853 	return ret;
1854 }
1855 
1856 static int i915_opregion(struct seq_file *m, void *unused)
1857 {
1858 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1859 	struct drm_device *dev = &dev_priv->drm;
1860 	struct intel_opregion *opregion = &dev_priv->opregion;
1861 	int ret;
1862 
1863 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1864 	if (ret)
1865 		goto out;
1866 
1867 	if (opregion->header)
1868 		seq_write(m, opregion->header, OPREGION_SIZE);
1869 
1870 	mutex_unlock(&dev->struct_mutex);
1871 
1872 out:
1873 	return 0;
1874 }
1875 
1876 static int i915_vbt(struct seq_file *m, void *unused)
1877 {
1878 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1879 
1880 	if (opregion->vbt)
1881 		seq_write(m, opregion->vbt, opregion->vbt_size);
1882 
1883 	return 0;
1884 }
1885 
1886 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1887 {
1888 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1889 	struct drm_device *dev = &dev_priv->drm;
1890 	struct intel_framebuffer *fbdev_fb = NULL;
1891 	struct drm_framebuffer *drm_fb;
1892 	int ret;
1893 
1894 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1895 	if (ret)
1896 		return ret;
1897 
1898 #ifdef CONFIG_DRM_FBDEV_EMULATION
1899 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1900 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1901 
1902 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1903 			   fbdev_fb->base.width,
1904 			   fbdev_fb->base.height,
1905 			   fbdev_fb->base.format->depth,
1906 			   fbdev_fb->base.format->cpp[0] * 8,
1907 			   fbdev_fb->base.modifier,
1908 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1909 		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1910 		seq_putc(m, '\n');
1911 	}
1912 #endif
1913 
1914 	mutex_lock(&dev->mode_config.fb_lock);
1915 	drm_for_each_fb(drm_fb, dev) {
1916 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1917 		if (fb == fbdev_fb)
1918 			continue;
1919 
1920 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1921 			   fb->base.width,
1922 			   fb->base.height,
1923 			   fb->base.format->depth,
1924 			   fb->base.format->cpp[0] * 8,
1925 			   fb->base.modifier,
1926 			   drm_framebuffer_read_refcount(&fb->base));
1927 		describe_obj(m, intel_fb_obj(&fb->base));
1928 		seq_putc(m, '\n');
1929 	}
1930 	mutex_unlock(&dev->mode_config.fb_lock);
1931 	mutex_unlock(&dev->struct_mutex);
1932 
1933 	return 0;
1934 }
1935 
1936 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1937 {
1938 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1939 		   ring->space, ring->head, ring->tail, ring->emit);
1940 }
1941 
1942 static int i915_context_status(struct seq_file *m, void *unused)
1943 {
1944 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1945 	struct drm_device *dev = &dev_priv->drm;
1946 	struct intel_engine_cs *engine;
1947 	struct i915_gem_context *ctx;
1948 	enum intel_engine_id id;
1949 	int ret;
1950 
1951 	ret = mutex_lock_interruptible(&dev->struct_mutex);
1952 	if (ret)
1953 		return ret;
1954 
1955 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1956 		seq_puts(m, "HW context ");
1957 		if (!list_empty(&ctx->hw_id_link))
1958 			seq_printf(m, "%x [pin %u]", ctx->hw_id,
1959 				   atomic_read(&ctx->hw_id_pin_count));
1960 		if (ctx->pid) {
1961 			struct task_struct *task;
1962 
1963 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1964 			if (task) {
1965 				seq_printf(m, "(%s [%d]) ",
1966 					   task->comm, task->pid);
1967 				put_task_struct(task);
1968 			}
1969 		} else if (IS_ERR(ctx->file_priv)) {
1970 			seq_puts(m, "(deleted) ");
1971 		} else {
1972 			seq_puts(m, "(kernel) ");
1973 		}
1974 
1975 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1976 		seq_putc(m, '\n');
1977 
1978 		for_each_engine(engine, dev_priv, id) {
1979 			struct intel_context *ce =
1980 				to_intel_context(ctx, engine);
1981 
1982 			seq_printf(m, "%s: ", engine->name);
1983 			if (ce->state)
1984 				describe_obj(m, ce->state->obj);
1985 			if (ce->ring)
1986 				describe_ctx_ring(m, ce->ring);
1987 			seq_putc(m, '\n');
1988 		}
1989 
1990 		seq_putc(m, '\n');
1991 	}
1992 
1993 	mutex_unlock(&dev->struct_mutex);
1994 
1995 	return 0;
1996 }
1997 
1998 static const char *swizzle_string(unsigned swizzle)
1999 {
2000 	switch (swizzle) {
2001 	case I915_BIT_6_SWIZZLE_NONE:
2002 		return "none";
2003 	case I915_BIT_6_SWIZZLE_9:
2004 		return "bit9";
2005 	case I915_BIT_6_SWIZZLE_9_10:
2006 		return "bit9/bit10";
2007 	case I915_BIT_6_SWIZZLE_9_11:
2008 		return "bit9/bit11";
2009 	case I915_BIT_6_SWIZZLE_9_10_11:
2010 		return "bit9/bit10/bit11";
2011 	case I915_BIT_6_SWIZZLE_9_17:
2012 		return "bit9/bit17";
2013 	case I915_BIT_6_SWIZZLE_9_10_17:
2014 		return "bit9/bit10/bit17";
2015 	case I915_BIT_6_SWIZZLE_UNKNOWN:
2016 		return "unknown";
2017 	}
2018 
2019 	return "bug";
2020 }
2021 
2022 static int i915_swizzle_info(struct seq_file *m, void *data)
2023 {
2024 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2025 
2026 	intel_runtime_pm_get(dev_priv);
2027 
2028 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2029 		   swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2030 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2031 		   swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2032 
2033 	if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2034 		seq_printf(m, "DDC = 0x%08x\n",
2035 			   I915_READ(DCC));
2036 		seq_printf(m, "DDC2 = 0x%08x\n",
2037 			   I915_READ(DCC2));
2038 		seq_printf(m, "C0DRB3 = 0x%04x\n",
2039 			   I915_READ16(C0DRB3));
2040 		seq_printf(m, "C1DRB3 = 0x%04x\n",
2041 			   I915_READ16(C1DRB3));
2042 	} else if (INTEL_GEN(dev_priv) >= 6) {
2043 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2044 			   I915_READ(MAD_DIMM_C0));
2045 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2046 			   I915_READ(MAD_DIMM_C1));
2047 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2048 			   I915_READ(MAD_DIMM_C2));
2049 		seq_printf(m, "TILECTL = 0x%08x\n",
2050 			   I915_READ(TILECTL));
2051 		if (INTEL_GEN(dev_priv) >= 8)
2052 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2053 				   I915_READ(GAMTARBMODE));
2054 		else
2055 			seq_printf(m, "ARB_MODE = 0x%08x\n",
2056 				   I915_READ(ARB_MODE));
2057 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2058 			   I915_READ(DISP_ARB_CTL));
2059 	}
2060 
2061 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2062 		seq_puts(m, "L-shaped memory detected\n");
2063 
2064 	intel_runtime_pm_put(dev_priv);
2065 
2066 	return 0;
2067 }
2068 
2069 static int per_file_ctx(int id, void *ptr, void *data)
2070 {
2071 	struct i915_gem_context *ctx = ptr;
2072 	struct seq_file *m = data;
2073 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2074 
2075 	if (!ppgtt) {
2076 		seq_printf(m, "  no ppgtt for context %d\n",
2077 			   ctx->user_handle);
2078 		return 0;
2079 	}
2080 
2081 	if (i915_gem_context_is_default(ctx))
2082 		seq_puts(m, "  default context:\n");
2083 	else
2084 		seq_printf(m, "  context %d:\n", ctx->user_handle);
2085 	ppgtt->debug_dump(ppgtt, m);
2086 
2087 	return 0;
2088 }
2089 
2090 static void gen8_ppgtt_info(struct seq_file *m,
2091 			    struct drm_i915_private *dev_priv)
2092 {
2093 	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2094 	struct intel_engine_cs *engine;
2095 	enum intel_engine_id id;
2096 	int i;
2097 
2098 	if (!ppgtt)
2099 		return;
2100 
2101 	for_each_engine(engine, dev_priv, id) {
2102 		seq_printf(m, "%s\n", engine->name);
2103 		for (i = 0; i < 4; i++) {
2104 			u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2105 			pdp <<= 32;
2106 			pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2107 			seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2108 		}
2109 	}
2110 }
2111 
2112 static void gen6_ppgtt_info(struct seq_file *m,
2113 			    struct drm_i915_private *dev_priv)
2114 {
2115 	struct intel_engine_cs *engine;
2116 	enum intel_engine_id id;
2117 
2118 	if (IS_GEN6(dev_priv))
2119 		seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2120 
2121 	for_each_engine(engine, dev_priv, id) {
2122 		seq_printf(m, "%s\n", engine->name);
2123 		if (IS_GEN7(dev_priv))
2124 			seq_printf(m, "GFX_MODE: 0x%08x\n",
2125 				   I915_READ(RING_MODE_GEN7(engine)));
2126 		seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2127 			   I915_READ(RING_PP_DIR_BASE(engine)));
2128 		seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2129 			   I915_READ(RING_PP_DIR_BASE_READ(engine)));
2130 		seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2131 			   I915_READ(RING_PP_DIR_DCLV(engine)));
2132 	}
2133 	if (dev_priv->mm.aliasing_ppgtt) {
2134 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2135 
2136 		seq_puts(m, "aliasing PPGTT:\n");
2137 		seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2138 
2139 		ppgtt->debug_dump(ppgtt, m);
2140 	}
2141 
2142 	seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2143 }
2144 
2145 static int i915_ppgtt_info(struct seq_file *m, void *data)
2146 {
2147 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2148 	struct drm_device *dev = &dev_priv->drm;
2149 	struct drm_file *file;
2150 	int ret;
2151 
2152 	mutex_lock(&dev->filelist_mutex);
2153 	ret = mutex_lock_interruptible(&dev->struct_mutex);
2154 	if (ret)
2155 		goto out_unlock;
2156 
2157 	intel_runtime_pm_get(dev_priv);
2158 
2159 	if (INTEL_GEN(dev_priv) >= 8)
2160 		gen8_ppgtt_info(m, dev_priv);
2161 	else if (INTEL_GEN(dev_priv) >= 6)
2162 		gen6_ppgtt_info(m, dev_priv);
2163 
2164 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2165 		struct drm_i915_file_private *file_priv = file->driver_priv;
2166 		struct task_struct *task;
2167 
2168 		task = get_pid_task(file->pid, PIDTYPE_PID);
2169 		if (!task) {
2170 			ret = -ESRCH;
2171 			goto out_rpm;
2172 		}
2173 		seq_printf(m, "\nproc: %s\n", task->comm);
2174 		put_task_struct(task);
2175 		idr_for_each(&file_priv->context_idr, per_file_ctx,
2176 			     (void *)(unsigned long)m);
2177 	}
2178 
2179 out_rpm:
2180 	intel_runtime_pm_put(dev_priv);
2181 	mutex_unlock(&dev->struct_mutex);
2182 out_unlock:
2183 	mutex_unlock(&dev->filelist_mutex);
2184 	return ret;
2185 }
2186 
2187 static int count_irq_waiters(struct drm_i915_private *i915)
2188 {
2189 	struct intel_engine_cs *engine;
2190 	enum intel_engine_id id;
2191 	int count = 0;
2192 
2193 	for_each_engine(engine, i915, id)
2194 		count += intel_engine_has_waiter(engine);
2195 
2196 	return count;
2197 }
2198 
2199 static const char *rps_power_to_str(unsigned int power)
2200 {
2201 	static const char * const strings[] = {
2202 		[LOW_POWER] = "low power",
2203 		[BETWEEN] = "mixed",
2204 		[HIGH_POWER] = "high power",
2205 	};
2206 
2207 	if (power >= ARRAY_SIZE(strings) || !strings[power])
2208 		return "unknown";
2209 
2210 	return strings[power];
2211 }
2212 
2213 static int i915_rps_boost_info(struct seq_file *m, void *data)
2214 {
2215 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2216 	struct drm_device *dev = &dev_priv->drm;
2217 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
2218 	struct drm_file *file;
2219 
2220 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2221 	seq_printf(m, "GPU busy? %s [%d requests]\n",
2222 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2223 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2224 	seq_printf(m, "Boosts outstanding? %d\n",
2225 		   atomic_read(&rps->num_waiters));
2226 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2227 	seq_printf(m, "Frequency requested %d\n",
2228 		   intel_gpu_freq(dev_priv, rps->cur_freq));
2229 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2230 		   intel_gpu_freq(dev_priv, rps->min_freq),
2231 		   intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2232 		   intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2233 		   intel_gpu_freq(dev_priv, rps->max_freq));
2234 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2235 		   intel_gpu_freq(dev_priv, rps->idle_freq),
2236 		   intel_gpu_freq(dev_priv, rps->efficient_freq),
2237 		   intel_gpu_freq(dev_priv, rps->boost_freq));
2238 
2239 	mutex_lock(&dev->filelist_mutex);
2240 	list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2241 		struct drm_i915_file_private *file_priv = file->driver_priv;
2242 		struct task_struct *task;
2243 
2244 		rcu_read_lock();
2245 		task = pid_task(file->pid, PIDTYPE_PID);
2246 		seq_printf(m, "%s [%d]: %d boosts\n",
2247 			   task ? task->comm : "<unknown>",
2248 			   task ? task->pid : -1,
2249 			   atomic_read(&file_priv->rps_client.boosts));
2250 		rcu_read_unlock();
2251 	}
2252 	seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2253 		   atomic_read(&rps->boosts));
2254 	mutex_unlock(&dev->filelist_mutex);
2255 
2256 	if (INTEL_GEN(dev_priv) >= 6 &&
2257 	    rps->enabled &&
2258 	    dev_priv->gt.active_requests) {
2259 		u32 rpup, rpupei;
2260 		u32 rpdown, rpdownei;
2261 
2262 		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2263 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2264 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2265 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2266 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2267 		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2268 
2269 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2270 			   rps_power_to_str(rps->power.mode));
2271 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2272 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
2273 			   rps->power.up_threshold);
2274 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2275 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2276 			   rps->power.down_threshold);
2277 	} else {
2278 		seq_puts(m, "\nRPS Autotuning inactive\n");
2279 	}
2280 
2281 	return 0;
2282 }
2283 
2284 static int i915_llc(struct seq_file *m, void *data)
2285 {
2286 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2287 	const bool edram = INTEL_GEN(dev_priv) > 8;
2288 
2289 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2290 	seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2291 		   intel_uncore_edram_size(dev_priv)/1024/1024);
2292 
2293 	return 0;
2294 }
2295 
2296 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2297 {
2298 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2299 	struct drm_printer p;
2300 
2301 	if (!HAS_HUC(dev_priv))
2302 		return -ENODEV;
2303 
2304 	p = drm_seq_file_printer(m);
2305 	intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2306 
2307 	intel_runtime_pm_get(dev_priv);
2308 	seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2309 	intel_runtime_pm_put(dev_priv);
2310 
2311 	return 0;
2312 }
2313 
2314 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2315 {
2316 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2317 	struct drm_printer p;
2318 	u32 tmp, i;
2319 
2320 	if (!HAS_GUC(dev_priv))
2321 		return -ENODEV;
2322 
2323 	p = drm_seq_file_printer(m);
2324 	intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2325 
2326 	intel_runtime_pm_get(dev_priv);
2327 
2328 	tmp = I915_READ(GUC_STATUS);
2329 
2330 	seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2331 	seq_printf(m, "\tBootrom status = 0x%x\n",
2332 		(tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2333 	seq_printf(m, "\tuKernel status = 0x%x\n",
2334 		(tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2335 	seq_printf(m, "\tMIA Core status = 0x%x\n",
2336 		(tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2337 	seq_puts(m, "\nScratch registers:\n");
2338 	for (i = 0; i < 16; i++)
2339 		seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2340 
2341 	intel_runtime_pm_put(dev_priv);
2342 
2343 	return 0;
2344 }
2345 
2346 static const char *
2347 stringify_guc_log_type(enum guc_log_buffer_type type)
2348 {
2349 	switch (type) {
2350 	case GUC_ISR_LOG_BUFFER:
2351 		return "ISR";
2352 	case GUC_DPC_LOG_BUFFER:
2353 		return "DPC";
2354 	case GUC_CRASH_DUMP_LOG_BUFFER:
2355 		return "CRASH";
2356 	default:
2357 		MISSING_CASE(type);
2358 	}
2359 
2360 	return "";
2361 }
2362 
2363 static void i915_guc_log_info(struct seq_file *m,
2364 			      struct drm_i915_private *dev_priv)
2365 {
2366 	struct intel_guc_log *log = &dev_priv->guc.log;
2367 	enum guc_log_buffer_type type;
2368 
2369 	if (!intel_guc_log_relay_enabled(log)) {
2370 		seq_puts(m, "GuC log relay disabled\n");
2371 		return;
2372 	}
2373 
2374 	seq_puts(m, "GuC logging stats:\n");
2375 
2376 	seq_printf(m, "\tRelay full count: %u\n",
2377 		   log->relay.full_count);
2378 
2379 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2380 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2381 			   stringify_guc_log_type(type),
2382 			   log->stats[type].flush,
2383 			   log->stats[type].sampled_overflow);
2384 	}
2385 }
2386 
2387 static void i915_guc_client_info(struct seq_file *m,
2388 				 struct drm_i915_private *dev_priv,
2389 				 struct intel_guc_client *client)
2390 {
2391 	struct intel_engine_cs *engine;
2392 	enum intel_engine_id id;
2393 	uint64_t tot = 0;
2394 
2395 	seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2396 		client->priority, client->stage_id, client->proc_desc_offset);
2397 	seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2398 		client->doorbell_id, client->doorbell_offset);
2399 
2400 	for_each_engine(engine, dev_priv, id) {
2401 		u64 submissions = client->submissions[id];
2402 		tot += submissions;
2403 		seq_printf(m, "\tSubmissions: %llu %s\n",
2404 				submissions, engine->name);
2405 	}
2406 	seq_printf(m, "\tTotal: %llu\n", tot);
2407 }
2408 
2409 static int i915_guc_info(struct seq_file *m, void *data)
2410 {
2411 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2412 	const struct intel_guc *guc = &dev_priv->guc;
2413 
2414 	if (!USES_GUC(dev_priv))
2415 		return -ENODEV;
2416 
2417 	i915_guc_log_info(m, dev_priv);
2418 
2419 	if (!USES_GUC_SUBMISSION(dev_priv))
2420 		return 0;
2421 
2422 	GEM_BUG_ON(!guc->execbuf_client);
2423 
2424 	seq_printf(m, "\nDoorbell map:\n");
2425 	seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2426 	seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2427 
2428 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2429 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2430 	if (guc->preempt_client) {
2431 		seq_printf(m, "\nGuC preempt client @ %p:\n",
2432 			   guc->preempt_client);
2433 		i915_guc_client_info(m, dev_priv, guc->preempt_client);
2434 	}
2435 
2436 	/* Add more as required ... */
2437 
2438 	return 0;
2439 }
2440 
2441 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2442 {
2443 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2444 	const struct intel_guc *guc = &dev_priv->guc;
2445 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2446 	struct intel_guc_client *client = guc->execbuf_client;
2447 	unsigned int tmp;
2448 	int index;
2449 
2450 	if (!USES_GUC_SUBMISSION(dev_priv))
2451 		return -ENODEV;
2452 
2453 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2454 		struct intel_engine_cs *engine;
2455 
2456 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2457 			continue;
2458 
2459 		seq_printf(m, "GuC stage descriptor %u:\n", index);
2460 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2461 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2462 		seq_printf(m, "\tPriority: %d\n", desc->priority);
2463 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2464 		seq_printf(m, "\tEngines used: 0x%x\n",
2465 			   desc->engines_used);
2466 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2467 			   desc->db_trigger_phy,
2468 			   desc->db_trigger_cpu,
2469 			   desc->db_trigger_uk);
2470 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
2471 			   desc->process_desc);
2472 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2473 			   desc->wq_addr, desc->wq_size);
2474 		seq_putc(m, '\n');
2475 
2476 		for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2477 			u32 guc_engine_id = engine->guc_id;
2478 			struct guc_execlist_context *lrc =
2479 						&desc->lrc[guc_engine_id];
2480 
2481 			seq_printf(m, "\t%s LRC:\n", engine->name);
2482 			seq_printf(m, "\t\tContext desc: 0x%x\n",
2483 				   lrc->context_desc);
2484 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2485 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2486 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2487 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2488 			seq_putc(m, '\n');
2489 		}
2490 	}
2491 
2492 	return 0;
2493 }
2494 
2495 static int i915_guc_log_dump(struct seq_file *m, void *data)
2496 {
2497 	struct drm_info_node *node = m->private;
2498 	struct drm_i915_private *dev_priv = node_to_i915(node);
2499 	bool dump_load_err = !!node->info_ent->data;
2500 	struct drm_i915_gem_object *obj = NULL;
2501 	u32 *log;
2502 	int i = 0;
2503 
2504 	if (!HAS_GUC(dev_priv))
2505 		return -ENODEV;
2506 
2507 	if (dump_load_err)
2508 		obj = dev_priv->guc.load_err_log;
2509 	else if (dev_priv->guc.log.vma)
2510 		obj = dev_priv->guc.log.vma->obj;
2511 
2512 	if (!obj)
2513 		return 0;
2514 
2515 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2516 	if (IS_ERR(log)) {
2517 		DRM_DEBUG("Failed to pin object\n");
2518 		seq_puts(m, "(log data unaccessible)\n");
2519 		return PTR_ERR(log);
2520 	}
2521 
2522 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2523 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2524 			   *(log + i), *(log + i + 1),
2525 			   *(log + i + 2), *(log + i + 3));
2526 
2527 	seq_putc(m, '\n');
2528 
2529 	i915_gem_object_unpin_map(obj);
2530 
2531 	return 0;
2532 }
2533 
2534 static int i915_guc_log_level_get(void *data, u64 *val)
2535 {
2536 	struct drm_i915_private *dev_priv = data;
2537 
2538 	if (!USES_GUC(dev_priv))
2539 		return -ENODEV;
2540 
2541 	*val = intel_guc_log_get_level(&dev_priv->guc.log);
2542 
2543 	return 0;
2544 }
2545 
2546 static int i915_guc_log_level_set(void *data, u64 val)
2547 {
2548 	struct drm_i915_private *dev_priv = data;
2549 
2550 	if (!USES_GUC(dev_priv))
2551 		return -ENODEV;
2552 
2553 	return intel_guc_log_set_level(&dev_priv->guc.log, val);
2554 }
2555 
2556 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2557 			i915_guc_log_level_get, i915_guc_log_level_set,
2558 			"%lld\n");
2559 
2560 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2561 {
2562 	struct drm_i915_private *dev_priv = inode->i_private;
2563 
2564 	if (!USES_GUC(dev_priv))
2565 		return -ENODEV;
2566 
2567 	file->private_data = &dev_priv->guc.log;
2568 
2569 	return intel_guc_log_relay_open(&dev_priv->guc.log);
2570 }
2571 
2572 static ssize_t
2573 i915_guc_log_relay_write(struct file *filp,
2574 			 const char __user *ubuf,
2575 			 size_t cnt,
2576 			 loff_t *ppos)
2577 {
2578 	struct intel_guc_log *log = filp->private_data;
2579 
2580 	intel_guc_log_relay_flush(log);
2581 
2582 	return cnt;
2583 }
2584 
2585 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2586 {
2587 	struct drm_i915_private *dev_priv = inode->i_private;
2588 
2589 	intel_guc_log_relay_close(&dev_priv->guc.log);
2590 
2591 	return 0;
2592 }
2593 
2594 static const struct file_operations i915_guc_log_relay_fops = {
2595 	.owner = THIS_MODULE,
2596 	.open = i915_guc_log_relay_open,
2597 	.write = i915_guc_log_relay_write,
2598 	.release = i915_guc_log_relay_release,
2599 };
2600 
2601 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2602 {
2603 	u8 val;
2604 	static const char * const sink_status[] = {
2605 		"inactive",
2606 		"transition to active, capture and display",
2607 		"active, display from RFB",
2608 		"active, capture and display on sink device timings",
2609 		"transition to inactive, capture and display, timing re-sync",
2610 		"reserved",
2611 		"reserved",
2612 		"sink internal error",
2613 	};
2614 	struct drm_connector *connector = m->private;
2615 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2616 	struct intel_dp *intel_dp =
2617 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2618 	int ret;
2619 
2620 	if (!CAN_PSR(dev_priv)) {
2621 		seq_puts(m, "PSR Unsupported\n");
2622 		return -ENODEV;
2623 	}
2624 
2625 	if (connector->status != connector_status_connected)
2626 		return -ENODEV;
2627 
2628 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2629 
2630 	if (ret == 1) {
2631 		const char *str = "unknown";
2632 
2633 		val &= DP_PSR_SINK_STATE_MASK;
2634 		if (val < ARRAY_SIZE(sink_status))
2635 			str = sink_status[val];
2636 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2637 	} else {
2638 		return ret;
2639 	}
2640 
2641 	return 0;
2642 }
2643 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2644 
2645 static void
2646 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2647 {
2648 	u32 val, psr_status;
2649 
2650 	if (dev_priv->psr.psr2_enabled) {
2651 		static const char * const live_status[] = {
2652 			"IDLE",
2653 			"CAPTURE",
2654 			"CAPTURE_FS",
2655 			"SLEEP",
2656 			"BUFON_FW",
2657 			"ML_UP",
2658 			"SU_STANDBY",
2659 			"FAST_SLEEP",
2660 			"DEEP_SLEEP",
2661 			"BUF_ON",
2662 			"TG_ON"
2663 		};
2664 		psr_status = I915_READ(EDP_PSR2_STATUS);
2665 		val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2666 			EDP_PSR2_STATUS_STATE_SHIFT;
2667 		if (val < ARRAY_SIZE(live_status)) {
2668 			seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2669 				   psr_status, live_status[val]);
2670 			return;
2671 		}
2672 	} else {
2673 		static const char * const live_status[] = {
2674 			"IDLE",
2675 			"SRDONACK",
2676 			"SRDENT",
2677 			"BUFOFF",
2678 			"BUFON",
2679 			"AUXACK",
2680 			"SRDOFFACK",
2681 			"SRDENT_ON",
2682 		};
2683 		psr_status = I915_READ(EDP_PSR_STATUS);
2684 		val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2685 			EDP_PSR_STATUS_STATE_SHIFT;
2686 		if (val < ARRAY_SIZE(live_status)) {
2687 			seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2688 				   psr_status, live_status[val]);
2689 			return;
2690 		}
2691 	}
2692 
2693 	seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
2694 }
2695 
2696 static int i915_edp_psr_status(struct seq_file *m, void *data)
2697 {
2698 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2699 	u32 psrperf = 0;
2700 	bool enabled = false;
2701 	bool sink_support;
2702 
2703 	if (!HAS_PSR(dev_priv))
2704 		return -ENODEV;
2705 
2706 	sink_support = dev_priv->psr.sink_support;
2707 	seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2708 	if (!sink_support)
2709 		return 0;
2710 
2711 	intel_runtime_pm_get(dev_priv);
2712 
2713 	mutex_lock(&dev_priv->psr.lock);
2714 	seq_printf(m, "PSR mode: %s\n",
2715 		   dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
2716 	seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
2717 	seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2718 		   dev_priv->psr.busy_frontbuffer_bits);
2719 
2720 	if (dev_priv->psr.psr2_enabled)
2721 		enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2722 	else
2723 		enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2724 
2725 	seq_printf(m, "Main link in standby mode: %s\n",
2726 		   yesno(dev_priv->psr.link_standby));
2727 
2728 	seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2729 
2730 	/*
2731 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2732 	 */
2733 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2734 		psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2735 			EDP_PSR_PERF_CNT_MASK;
2736 
2737 		seq_printf(m, "Performance_Counter: %u\n", psrperf);
2738 	}
2739 
2740 	psr_source_status(dev_priv, m);
2741 	mutex_unlock(&dev_priv->psr.lock);
2742 
2743 	if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
2744 		seq_printf(m, "Last attempted entry at: %lld\n",
2745 			   dev_priv->psr.last_entry_attempt);
2746 		seq_printf(m, "Last exit at: %lld\n",
2747 			   dev_priv->psr.last_exit);
2748 	}
2749 
2750 	intel_runtime_pm_put(dev_priv);
2751 	return 0;
2752 }
2753 
2754 static int
2755 i915_edp_psr_debug_set(void *data, u64 val)
2756 {
2757 	struct drm_i915_private *dev_priv = data;
2758 	struct drm_modeset_acquire_ctx ctx;
2759 	int ret;
2760 
2761 	if (!CAN_PSR(dev_priv))
2762 		return -ENODEV;
2763 
2764 	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2765 
2766 	intel_runtime_pm_get(dev_priv);
2767 
2768 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2769 
2770 retry:
2771 	ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2772 	if (ret == -EDEADLK) {
2773 		ret = drm_modeset_backoff(&ctx);
2774 		if (!ret)
2775 			goto retry;
2776 	}
2777 
2778 	drm_modeset_drop_locks(&ctx);
2779 	drm_modeset_acquire_fini(&ctx);
2780 
2781 	intel_runtime_pm_put(dev_priv);
2782 
2783 	return ret;
2784 }
2785 
2786 static int
2787 i915_edp_psr_debug_get(void *data, u64 *val)
2788 {
2789 	struct drm_i915_private *dev_priv = data;
2790 
2791 	if (!CAN_PSR(dev_priv))
2792 		return -ENODEV;
2793 
2794 	*val = READ_ONCE(dev_priv->psr.debug);
2795 	return 0;
2796 }
2797 
2798 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2799 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2800 			"%llu\n");
2801 
2802 static int i915_energy_uJ(struct seq_file *m, void *data)
2803 {
2804 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2805 	unsigned long long power;
2806 	u32 units;
2807 
2808 	if (INTEL_GEN(dev_priv) < 6)
2809 		return -ENODEV;
2810 
2811 	intel_runtime_pm_get(dev_priv);
2812 
2813 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2814 		intel_runtime_pm_put(dev_priv);
2815 		return -ENODEV;
2816 	}
2817 
2818 	units = (power & 0x1f00) >> 8;
2819 	power = I915_READ(MCH_SECP_NRG_STTS);
2820 	power = (1000000 * power) >> units; /* convert to uJ */
2821 
2822 	intel_runtime_pm_put(dev_priv);
2823 
2824 	seq_printf(m, "%llu", power);
2825 
2826 	return 0;
2827 }
2828 
2829 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2830 {
2831 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2832 	struct pci_dev *pdev = dev_priv->drm.pdev;
2833 
2834 	if (!HAS_RUNTIME_PM(dev_priv))
2835 		seq_puts(m, "Runtime power management not supported\n");
2836 
2837 	seq_printf(m, "GPU idle: %s (epoch %u)\n",
2838 		   yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2839 	seq_printf(m, "IRQs disabled: %s\n",
2840 		   yesno(!intel_irqs_enabled(dev_priv)));
2841 #ifdef CONFIG_PM
2842 	seq_printf(m, "Usage count: %d\n",
2843 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2844 #else
2845 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2846 #endif
2847 	seq_printf(m, "PCI device power state: %s [%d]\n",
2848 		   pci_power_name(pdev->current_state),
2849 		   pdev->current_state);
2850 
2851 	return 0;
2852 }
2853 
2854 static int i915_power_domain_info(struct seq_file *m, void *unused)
2855 {
2856 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2857 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2858 	int i;
2859 
2860 	mutex_lock(&power_domains->lock);
2861 
2862 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2863 	for (i = 0; i < power_domains->power_well_count; i++) {
2864 		struct i915_power_well *power_well;
2865 		enum intel_display_power_domain power_domain;
2866 
2867 		power_well = &power_domains->power_wells[i];
2868 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
2869 			   power_well->count);
2870 
2871 		for_each_power_domain(power_domain, power_well->desc->domains)
2872 			seq_printf(m, "  %-23s %d\n",
2873 				 intel_display_power_domain_str(power_domain),
2874 				 power_domains->domain_use_count[power_domain]);
2875 	}
2876 
2877 	mutex_unlock(&power_domains->lock);
2878 
2879 	return 0;
2880 }
2881 
2882 static int i915_dmc_info(struct seq_file *m, void *unused)
2883 {
2884 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2885 	struct intel_csr *csr;
2886 
2887 	if (!HAS_CSR(dev_priv))
2888 		return -ENODEV;
2889 
2890 	csr = &dev_priv->csr;
2891 
2892 	intel_runtime_pm_get(dev_priv);
2893 
2894 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2895 	seq_printf(m, "path: %s\n", csr->fw_path);
2896 
2897 	if (!csr->dmc_payload)
2898 		goto out;
2899 
2900 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2901 		   CSR_VERSION_MINOR(csr->version));
2902 
2903 	if (IS_KABYLAKE(dev_priv) ||
2904 	    (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
2905 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2906 			   I915_READ(SKL_CSR_DC3_DC5_COUNT));
2907 		seq_printf(m, "DC5 -> DC6 count: %d\n",
2908 			   I915_READ(SKL_CSR_DC5_DC6_COUNT));
2909 	} else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
2910 		seq_printf(m, "DC3 -> DC5 count: %d\n",
2911 			   I915_READ(BXT_CSR_DC3_DC5_COUNT));
2912 	}
2913 
2914 out:
2915 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2916 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2917 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2918 
2919 	intel_runtime_pm_put(dev_priv);
2920 
2921 	return 0;
2922 }
2923 
2924 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2925 				 struct drm_display_mode *mode)
2926 {
2927 	int i;
2928 
2929 	for (i = 0; i < tabs; i++)
2930 		seq_putc(m, '\t');
2931 
2932 	seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2933 		   mode->base.id, mode->name,
2934 		   mode->vrefresh, mode->clock,
2935 		   mode->hdisplay, mode->hsync_start,
2936 		   mode->hsync_end, mode->htotal,
2937 		   mode->vdisplay, mode->vsync_start,
2938 		   mode->vsync_end, mode->vtotal,
2939 		   mode->type, mode->flags);
2940 }
2941 
2942 static void intel_encoder_info(struct seq_file *m,
2943 			       struct intel_crtc *intel_crtc,
2944 			       struct intel_encoder *intel_encoder)
2945 {
2946 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2947 	struct drm_device *dev = &dev_priv->drm;
2948 	struct drm_crtc *crtc = &intel_crtc->base;
2949 	struct intel_connector *intel_connector;
2950 	struct drm_encoder *encoder;
2951 
2952 	encoder = &intel_encoder->base;
2953 	seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2954 		   encoder->base.id, encoder->name);
2955 	for_each_connector_on_encoder(dev, encoder, intel_connector) {
2956 		struct drm_connector *connector = &intel_connector->base;
2957 		seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2958 			   connector->base.id,
2959 			   connector->name,
2960 			   drm_get_connector_status_name(connector->status));
2961 		if (connector->status == connector_status_connected) {
2962 			struct drm_display_mode *mode = &crtc->mode;
2963 			seq_printf(m, ", mode:\n");
2964 			intel_seq_print_mode(m, 2, mode);
2965 		} else {
2966 			seq_putc(m, '\n');
2967 		}
2968 	}
2969 }
2970 
2971 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2972 {
2973 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2974 	struct drm_device *dev = &dev_priv->drm;
2975 	struct drm_crtc *crtc = &intel_crtc->base;
2976 	struct intel_encoder *intel_encoder;
2977 	struct drm_plane_state *plane_state = crtc->primary->state;
2978 	struct drm_framebuffer *fb = plane_state->fb;
2979 
2980 	if (fb)
2981 		seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2982 			   fb->base.id, plane_state->src_x >> 16,
2983 			   plane_state->src_y >> 16, fb->width, fb->height);
2984 	else
2985 		seq_puts(m, "\tprimary plane disabled\n");
2986 	for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2987 		intel_encoder_info(m, intel_crtc, intel_encoder);
2988 }
2989 
2990 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2991 {
2992 	struct drm_display_mode *mode = panel->fixed_mode;
2993 
2994 	seq_printf(m, "\tfixed mode:\n");
2995 	intel_seq_print_mode(m, 2, mode);
2996 }
2997 
2998 static void intel_dp_info(struct seq_file *m,
2999 			  struct intel_connector *intel_connector)
3000 {
3001 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3002 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3003 
3004 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3005 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3006 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3007 		intel_panel_info(m, &intel_connector->panel);
3008 
3009 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3010 				&intel_dp->aux);
3011 }
3012 
3013 static void intel_dp_mst_info(struct seq_file *m,
3014 			  struct intel_connector *intel_connector)
3015 {
3016 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3017 	struct intel_dp_mst_encoder *intel_mst =
3018 		enc_to_mst(&intel_encoder->base);
3019 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
3020 	struct intel_dp *intel_dp = &intel_dig_port->dp;
3021 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3022 					intel_connector->port);
3023 
3024 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3025 }
3026 
3027 static void intel_hdmi_info(struct seq_file *m,
3028 			    struct intel_connector *intel_connector)
3029 {
3030 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3031 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3032 
3033 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3034 }
3035 
3036 static void intel_lvds_info(struct seq_file *m,
3037 			    struct intel_connector *intel_connector)
3038 {
3039 	intel_panel_info(m, &intel_connector->panel);
3040 }
3041 
3042 static void intel_connector_info(struct seq_file *m,
3043 				 struct drm_connector *connector)
3044 {
3045 	struct intel_connector *intel_connector = to_intel_connector(connector);
3046 	struct intel_encoder *intel_encoder = intel_connector->encoder;
3047 	struct drm_display_mode *mode;
3048 
3049 	seq_printf(m, "connector %d: type %s, status: %s\n",
3050 		   connector->base.id, connector->name,
3051 		   drm_get_connector_status_name(connector->status));
3052 	if (connector->status == connector_status_connected) {
3053 		seq_printf(m, "\tname: %s\n", connector->display_info.name);
3054 		seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3055 			   connector->display_info.width_mm,
3056 			   connector->display_info.height_mm);
3057 		seq_printf(m, "\tsubpixel order: %s\n",
3058 			   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3059 		seq_printf(m, "\tCEA rev: %d\n",
3060 			   connector->display_info.cea_rev);
3061 	}
3062 
3063 	if (!intel_encoder)
3064 		return;
3065 
3066 	switch (connector->connector_type) {
3067 	case DRM_MODE_CONNECTOR_DisplayPort:
3068 	case DRM_MODE_CONNECTOR_eDP:
3069 		if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3070 			intel_dp_mst_info(m, intel_connector);
3071 		else
3072 			intel_dp_info(m, intel_connector);
3073 		break;
3074 	case DRM_MODE_CONNECTOR_LVDS:
3075 		if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3076 			intel_lvds_info(m, intel_connector);
3077 		break;
3078 	case DRM_MODE_CONNECTOR_HDMIA:
3079 		if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3080 		    intel_encoder->type == INTEL_OUTPUT_DDI)
3081 			intel_hdmi_info(m, intel_connector);
3082 		break;
3083 	default:
3084 		break;
3085 	}
3086 
3087 	seq_printf(m, "\tmodes:\n");
3088 	list_for_each_entry(mode, &connector->modes, head)
3089 		intel_seq_print_mode(m, 2, mode);
3090 }
3091 
3092 static const char *plane_type(enum drm_plane_type type)
3093 {
3094 	switch (type) {
3095 	case DRM_PLANE_TYPE_OVERLAY:
3096 		return "OVL";
3097 	case DRM_PLANE_TYPE_PRIMARY:
3098 		return "PRI";
3099 	case DRM_PLANE_TYPE_CURSOR:
3100 		return "CUR";
3101 	/*
3102 	 * Deliberately omitting default: to generate compiler warnings
3103 	 * when a new drm_plane_type gets added.
3104 	 */
3105 	}
3106 
3107 	return "unknown";
3108 }
3109 
3110 static const char *plane_rotation(unsigned int rotation)
3111 {
3112 	static char buf[48];
3113 	/*
3114 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3115 	 * will print them all to visualize if the values are misused
3116 	 */
3117 	snprintf(buf, sizeof(buf),
3118 		 "%s%s%s%s%s%s(0x%08x)",
3119 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3120 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3121 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3122 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3123 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3124 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3125 		 rotation);
3126 
3127 	return buf;
3128 }
3129 
3130 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3131 {
3132 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3133 	struct drm_device *dev = &dev_priv->drm;
3134 	struct intel_plane *intel_plane;
3135 
3136 	for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3137 		struct drm_plane_state *state;
3138 		struct drm_plane *plane = &intel_plane->base;
3139 		struct drm_format_name_buf format_name;
3140 
3141 		if (!plane->state) {
3142 			seq_puts(m, "plane->state is NULL!\n");
3143 			continue;
3144 		}
3145 
3146 		state = plane->state;
3147 
3148 		if (state->fb) {
3149 			drm_get_format_name(state->fb->format->format,
3150 					    &format_name);
3151 		} else {
3152 			sprintf(format_name.str, "N/A");
3153 		}
3154 
3155 		seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3156 			   plane->base.id,
3157 			   plane_type(intel_plane->base.type),
3158 			   state->crtc_x, state->crtc_y,
3159 			   state->crtc_w, state->crtc_h,
3160 			   (state->src_x >> 16),
3161 			   ((state->src_x & 0xffff) * 15625) >> 10,
3162 			   (state->src_y >> 16),
3163 			   ((state->src_y & 0xffff) * 15625) >> 10,
3164 			   (state->src_w >> 16),
3165 			   ((state->src_w & 0xffff) * 15625) >> 10,
3166 			   (state->src_h >> 16),
3167 			   ((state->src_h & 0xffff) * 15625) >> 10,
3168 			   format_name.str,
3169 			   plane_rotation(state->rotation));
3170 	}
3171 }
3172 
3173 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3174 {
3175 	struct intel_crtc_state *pipe_config;
3176 	int num_scalers = intel_crtc->num_scalers;
3177 	int i;
3178 
3179 	pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3180 
3181 	/* Not all platformas have a scaler */
3182 	if (num_scalers) {
3183 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3184 			   num_scalers,
3185 			   pipe_config->scaler_state.scaler_users,
3186 			   pipe_config->scaler_state.scaler_id);
3187 
3188 		for (i = 0; i < num_scalers; i++) {
3189 			struct intel_scaler *sc =
3190 					&pipe_config->scaler_state.scalers[i];
3191 
3192 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3193 				   i, yesno(sc->in_use), sc->mode);
3194 		}
3195 		seq_puts(m, "\n");
3196 	} else {
3197 		seq_puts(m, "\tNo scalers available on this platform\n");
3198 	}
3199 }
3200 
3201 static int i915_display_info(struct seq_file *m, void *unused)
3202 {
3203 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3204 	struct drm_device *dev = &dev_priv->drm;
3205 	struct intel_crtc *crtc;
3206 	struct drm_connector *connector;
3207 	struct drm_connector_list_iter conn_iter;
3208 
3209 	intel_runtime_pm_get(dev_priv);
3210 	seq_printf(m, "CRTC info\n");
3211 	seq_printf(m, "---------\n");
3212 	for_each_intel_crtc(dev, crtc) {
3213 		struct intel_crtc_state *pipe_config;
3214 
3215 		drm_modeset_lock(&crtc->base.mutex, NULL);
3216 		pipe_config = to_intel_crtc_state(crtc->base.state);
3217 
3218 		seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3219 			   crtc->base.base.id, pipe_name(crtc->pipe),
3220 			   yesno(pipe_config->base.active),
3221 			   pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3222 			   yesno(pipe_config->dither), pipe_config->pipe_bpp);
3223 
3224 		if (pipe_config->base.active) {
3225 			struct intel_plane *cursor =
3226 				to_intel_plane(crtc->base.cursor);
3227 
3228 			intel_crtc_info(m, crtc);
3229 
3230 			seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3231 				   yesno(cursor->base.state->visible),
3232 				   cursor->base.state->crtc_x,
3233 				   cursor->base.state->crtc_y,
3234 				   cursor->base.state->crtc_w,
3235 				   cursor->base.state->crtc_h,
3236 				   cursor->cursor.base);
3237 			intel_scaler_info(m, crtc);
3238 			intel_plane_info(m, crtc);
3239 		}
3240 
3241 		seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3242 			   yesno(!crtc->cpu_fifo_underrun_disabled),
3243 			   yesno(!crtc->pch_fifo_underrun_disabled));
3244 		drm_modeset_unlock(&crtc->base.mutex);
3245 	}
3246 
3247 	seq_printf(m, "\n");
3248 	seq_printf(m, "Connector info\n");
3249 	seq_printf(m, "--------------\n");
3250 	mutex_lock(&dev->mode_config.mutex);
3251 	drm_connector_list_iter_begin(dev, &conn_iter);
3252 	drm_for_each_connector_iter(connector, &conn_iter)
3253 		intel_connector_info(m, connector);
3254 	drm_connector_list_iter_end(&conn_iter);
3255 	mutex_unlock(&dev->mode_config.mutex);
3256 
3257 	intel_runtime_pm_put(dev_priv);
3258 
3259 	return 0;
3260 }
3261 
3262 static int i915_engine_info(struct seq_file *m, void *unused)
3263 {
3264 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3265 	struct intel_engine_cs *engine;
3266 	enum intel_engine_id id;
3267 	struct drm_printer p;
3268 
3269 	intel_runtime_pm_get(dev_priv);
3270 
3271 	seq_printf(m, "GT awake? %s (epoch %u)\n",
3272 		   yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3273 	seq_printf(m, "Global active requests: %d\n",
3274 		   dev_priv->gt.active_requests);
3275 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
3276 		   dev_priv->info.cs_timestamp_frequency_khz);
3277 
3278 	p = drm_seq_file_printer(m);
3279 	for_each_engine(engine, dev_priv, id)
3280 		intel_engine_dump(engine, &p, "%s\n", engine->name);
3281 
3282 	intel_runtime_pm_put(dev_priv);
3283 
3284 	return 0;
3285 }
3286 
3287 static int i915_rcs_topology(struct seq_file *m, void *unused)
3288 {
3289 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3290 	struct drm_printer p = drm_seq_file_printer(m);
3291 
3292 	intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3293 
3294 	return 0;
3295 }
3296 
3297 static int i915_shrinker_info(struct seq_file *m, void *unused)
3298 {
3299 	struct drm_i915_private *i915 = node_to_i915(m->private);
3300 
3301 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3302 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3303 
3304 	return 0;
3305 }
3306 
3307 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3308 {
3309 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3310 	struct drm_device *dev = &dev_priv->drm;
3311 	int i;
3312 
3313 	drm_modeset_lock_all(dev);
3314 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3315 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3316 
3317 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3318 			   pll->info->id);
3319 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3320 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3321 		seq_printf(m, " tracked hardware state:\n");
3322 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3323 		seq_printf(m, " dpll_md: 0x%08x\n",
3324 			   pll->state.hw_state.dpll_md);
3325 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3326 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3327 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3328 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3329 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3330 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3331 			   pll->state.hw_state.mg_refclkin_ctl);
3332 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3333 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
3334 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3335 			   pll->state.hw_state.mg_clktop2_hsclkctl);
3336 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
3337 			   pll->state.hw_state.mg_pll_div0);
3338 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
3339 			   pll->state.hw_state.mg_pll_div1);
3340 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
3341 			   pll->state.hw_state.mg_pll_lf);
3342 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3343 			   pll->state.hw_state.mg_pll_frac_lock);
3344 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3345 			   pll->state.hw_state.mg_pll_ssc);
3346 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
3347 			   pll->state.hw_state.mg_pll_bias);
3348 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3349 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
3350 	}
3351 	drm_modeset_unlock_all(dev);
3352 
3353 	return 0;
3354 }
3355 
3356 static int i915_wa_registers(struct seq_file *m, void *unused)
3357 {
3358 	struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
3359 	int i;
3360 
3361 	seq_printf(m, "Workarounds applied: %d\n", wa->count);
3362 	for (i = 0; i < wa->count; ++i)
3363 		seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3364 			   wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
3365 
3366 	return 0;
3367 }
3368 
3369 static int i915_ipc_status_show(struct seq_file *m, void *data)
3370 {
3371 	struct drm_i915_private *dev_priv = m->private;
3372 
3373 	seq_printf(m, "Isochronous Priority Control: %s\n",
3374 			yesno(dev_priv->ipc_enabled));
3375 	return 0;
3376 }
3377 
3378 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3379 {
3380 	struct drm_i915_private *dev_priv = inode->i_private;
3381 
3382 	if (!HAS_IPC(dev_priv))
3383 		return -ENODEV;
3384 
3385 	return single_open(file, i915_ipc_status_show, dev_priv);
3386 }
3387 
3388 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3389 				     size_t len, loff_t *offp)
3390 {
3391 	struct seq_file *m = file->private_data;
3392 	struct drm_i915_private *dev_priv = m->private;
3393 	int ret;
3394 	bool enable;
3395 
3396 	ret = kstrtobool_from_user(ubuf, len, &enable);
3397 	if (ret < 0)
3398 		return ret;
3399 
3400 	intel_runtime_pm_get(dev_priv);
3401 	if (!dev_priv->ipc_enabled && enable)
3402 		DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3403 	dev_priv->wm.distrust_bios_wm = true;
3404 	dev_priv->ipc_enabled = enable;
3405 	intel_enable_ipc(dev_priv);
3406 	intel_runtime_pm_put(dev_priv);
3407 
3408 	return len;
3409 }
3410 
3411 static const struct file_operations i915_ipc_status_fops = {
3412 	.owner = THIS_MODULE,
3413 	.open = i915_ipc_status_open,
3414 	.read = seq_read,
3415 	.llseek = seq_lseek,
3416 	.release = single_release,
3417 	.write = i915_ipc_status_write
3418 };
3419 
3420 static int i915_ddb_info(struct seq_file *m, void *unused)
3421 {
3422 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3423 	struct drm_device *dev = &dev_priv->drm;
3424 	struct skl_ddb_allocation *ddb;
3425 	struct skl_ddb_entry *entry;
3426 	enum pipe pipe;
3427 	int plane;
3428 
3429 	if (INTEL_GEN(dev_priv) < 9)
3430 		return -ENODEV;
3431 
3432 	drm_modeset_lock_all(dev);
3433 
3434 	ddb = &dev_priv->wm.skl_hw.ddb;
3435 
3436 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3437 
3438 	for_each_pipe(dev_priv, pipe) {
3439 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3440 
3441 		for_each_universal_plane(dev_priv, pipe, plane) {
3442 			entry = &ddb->plane[pipe][plane];
3443 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3444 				   entry->start, entry->end,
3445 				   skl_ddb_entry_size(entry));
3446 		}
3447 
3448 		entry = &ddb->plane[pipe][PLANE_CURSOR];
3449 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3450 			   entry->end, skl_ddb_entry_size(entry));
3451 	}
3452 
3453 	drm_modeset_unlock_all(dev);
3454 
3455 	return 0;
3456 }
3457 
3458 static void drrs_status_per_crtc(struct seq_file *m,
3459 				 struct drm_device *dev,
3460 				 struct intel_crtc *intel_crtc)
3461 {
3462 	struct drm_i915_private *dev_priv = to_i915(dev);
3463 	struct i915_drrs *drrs = &dev_priv->drrs;
3464 	int vrefresh = 0;
3465 	struct drm_connector *connector;
3466 	struct drm_connector_list_iter conn_iter;
3467 
3468 	drm_connector_list_iter_begin(dev, &conn_iter);
3469 	drm_for_each_connector_iter(connector, &conn_iter) {
3470 		if (connector->state->crtc != &intel_crtc->base)
3471 			continue;
3472 
3473 		seq_printf(m, "%s:\n", connector->name);
3474 	}
3475 	drm_connector_list_iter_end(&conn_iter);
3476 
3477 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3478 		seq_puts(m, "\tVBT: DRRS_type: Static");
3479 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3480 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
3481 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3482 		seq_puts(m, "\tVBT: DRRS_type: None");
3483 	else
3484 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3485 
3486 	seq_puts(m, "\n\n");
3487 
3488 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3489 		struct intel_panel *panel;
3490 
3491 		mutex_lock(&drrs->mutex);
3492 		/* DRRS Supported */
3493 		seq_puts(m, "\tDRRS Supported: Yes\n");
3494 
3495 		/* disable_drrs() will make drrs->dp NULL */
3496 		if (!drrs->dp) {
3497 			seq_puts(m, "Idleness DRRS: Disabled\n");
3498 			if (dev_priv->psr.enabled)
3499 				seq_puts(m,
3500 				"\tAs PSR is enabled, DRRS is not enabled\n");
3501 			mutex_unlock(&drrs->mutex);
3502 			return;
3503 		}
3504 
3505 		panel = &drrs->dp->attached_connector->panel;
3506 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3507 					drrs->busy_frontbuffer_bits);
3508 
3509 		seq_puts(m, "\n\t\t");
3510 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3511 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3512 			vrefresh = panel->fixed_mode->vrefresh;
3513 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3514 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3515 			vrefresh = panel->downclock_mode->vrefresh;
3516 		} else {
3517 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
3518 						drrs->refresh_rate_type);
3519 			mutex_unlock(&drrs->mutex);
3520 			return;
3521 		}
3522 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3523 
3524 		seq_puts(m, "\n\t\t");
3525 		mutex_unlock(&drrs->mutex);
3526 	} else {
3527 		/* DRRS not supported. Print the VBT parameter*/
3528 		seq_puts(m, "\tDRRS Supported : No");
3529 	}
3530 	seq_puts(m, "\n");
3531 }
3532 
3533 static int i915_drrs_status(struct seq_file *m, void *unused)
3534 {
3535 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3536 	struct drm_device *dev = &dev_priv->drm;
3537 	struct intel_crtc *intel_crtc;
3538 	int active_crtc_cnt = 0;
3539 
3540 	drm_modeset_lock_all(dev);
3541 	for_each_intel_crtc(dev, intel_crtc) {
3542 		if (intel_crtc->base.state->active) {
3543 			active_crtc_cnt++;
3544 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3545 
3546 			drrs_status_per_crtc(m, dev, intel_crtc);
3547 		}
3548 	}
3549 	drm_modeset_unlock_all(dev);
3550 
3551 	if (!active_crtc_cnt)
3552 		seq_puts(m, "No active crtc found\n");
3553 
3554 	return 0;
3555 }
3556 
3557 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3558 {
3559 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3560 	struct drm_device *dev = &dev_priv->drm;
3561 	struct intel_encoder *intel_encoder;
3562 	struct intel_digital_port *intel_dig_port;
3563 	struct drm_connector *connector;
3564 	struct drm_connector_list_iter conn_iter;
3565 
3566 	drm_connector_list_iter_begin(dev, &conn_iter);
3567 	drm_for_each_connector_iter(connector, &conn_iter) {
3568 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3569 			continue;
3570 
3571 		intel_encoder = intel_attached_encoder(connector);
3572 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3573 			continue;
3574 
3575 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3576 		if (!intel_dig_port->dp.can_mst)
3577 			continue;
3578 
3579 		seq_printf(m, "MST Source Port %c\n",
3580 			   port_name(intel_dig_port->base.port));
3581 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3582 	}
3583 	drm_connector_list_iter_end(&conn_iter);
3584 
3585 	return 0;
3586 }
3587 
3588 static ssize_t i915_displayport_test_active_write(struct file *file,
3589 						  const char __user *ubuf,
3590 						  size_t len, loff_t *offp)
3591 {
3592 	char *input_buffer;
3593 	int status = 0;
3594 	struct drm_device *dev;
3595 	struct drm_connector *connector;
3596 	struct drm_connector_list_iter conn_iter;
3597 	struct intel_dp *intel_dp;
3598 	int val = 0;
3599 
3600 	dev = ((struct seq_file *)file->private_data)->private;
3601 
3602 	if (len == 0)
3603 		return 0;
3604 
3605 	input_buffer = memdup_user_nul(ubuf, len);
3606 	if (IS_ERR(input_buffer))
3607 		return PTR_ERR(input_buffer);
3608 
3609 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3610 
3611 	drm_connector_list_iter_begin(dev, &conn_iter);
3612 	drm_for_each_connector_iter(connector, &conn_iter) {
3613 		struct intel_encoder *encoder;
3614 
3615 		if (connector->connector_type !=
3616 		    DRM_MODE_CONNECTOR_DisplayPort)
3617 			continue;
3618 
3619 		encoder = to_intel_encoder(connector->encoder);
3620 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3621 			continue;
3622 
3623 		if (encoder && connector->status == connector_status_connected) {
3624 			intel_dp = enc_to_intel_dp(&encoder->base);
3625 			status = kstrtoint(input_buffer, 10, &val);
3626 			if (status < 0)
3627 				break;
3628 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3629 			/* To prevent erroneous activation of the compliance
3630 			 * testing code, only accept an actual value of 1 here
3631 			 */
3632 			if (val == 1)
3633 				intel_dp->compliance.test_active = 1;
3634 			else
3635 				intel_dp->compliance.test_active = 0;
3636 		}
3637 	}
3638 	drm_connector_list_iter_end(&conn_iter);
3639 	kfree(input_buffer);
3640 	if (status < 0)
3641 		return status;
3642 
3643 	*offp += len;
3644 	return len;
3645 }
3646 
3647 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3648 {
3649 	struct drm_i915_private *dev_priv = m->private;
3650 	struct drm_device *dev = &dev_priv->drm;
3651 	struct drm_connector *connector;
3652 	struct drm_connector_list_iter conn_iter;
3653 	struct intel_dp *intel_dp;
3654 
3655 	drm_connector_list_iter_begin(dev, &conn_iter);
3656 	drm_for_each_connector_iter(connector, &conn_iter) {
3657 		struct intel_encoder *encoder;
3658 
3659 		if (connector->connector_type !=
3660 		    DRM_MODE_CONNECTOR_DisplayPort)
3661 			continue;
3662 
3663 		encoder = to_intel_encoder(connector->encoder);
3664 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3665 			continue;
3666 
3667 		if (encoder && connector->status == connector_status_connected) {
3668 			intel_dp = enc_to_intel_dp(&encoder->base);
3669 			if (intel_dp->compliance.test_active)
3670 				seq_puts(m, "1");
3671 			else
3672 				seq_puts(m, "0");
3673 		} else
3674 			seq_puts(m, "0");
3675 	}
3676 	drm_connector_list_iter_end(&conn_iter);
3677 
3678 	return 0;
3679 }
3680 
3681 static int i915_displayport_test_active_open(struct inode *inode,
3682 					     struct file *file)
3683 {
3684 	return single_open(file, i915_displayport_test_active_show,
3685 			   inode->i_private);
3686 }
3687 
3688 static const struct file_operations i915_displayport_test_active_fops = {
3689 	.owner = THIS_MODULE,
3690 	.open = i915_displayport_test_active_open,
3691 	.read = seq_read,
3692 	.llseek = seq_lseek,
3693 	.release = single_release,
3694 	.write = i915_displayport_test_active_write
3695 };
3696 
3697 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3698 {
3699 	struct drm_i915_private *dev_priv = m->private;
3700 	struct drm_device *dev = &dev_priv->drm;
3701 	struct drm_connector *connector;
3702 	struct drm_connector_list_iter conn_iter;
3703 	struct intel_dp *intel_dp;
3704 
3705 	drm_connector_list_iter_begin(dev, &conn_iter);
3706 	drm_for_each_connector_iter(connector, &conn_iter) {
3707 		struct intel_encoder *encoder;
3708 
3709 		if (connector->connector_type !=
3710 		    DRM_MODE_CONNECTOR_DisplayPort)
3711 			continue;
3712 
3713 		encoder = to_intel_encoder(connector->encoder);
3714 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3715 			continue;
3716 
3717 		if (encoder && connector->status == connector_status_connected) {
3718 			intel_dp = enc_to_intel_dp(&encoder->base);
3719 			if (intel_dp->compliance.test_type ==
3720 			    DP_TEST_LINK_EDID_READ)
3721 				seq_printf(m, "%lx",
3722 					   intel_dp->compliance.test_data.edid);
3723 			else if (intel_dp->compliance.test_type ==
3724 				 DP_TEST_LINK_VIDEO_PATTERN) {
3725 				seq_printf(m, "hdisplay: %d\n",
3726 					   intel_dp->compliance.test_data.hdisplay);
3727 				seq_printf(m, "vdisplay: %d\n",
3728 					   intel_dp->compliance.test_data.vdisplay);
3729 				seq_printf(m, "bpc: %u\n",
3730 					   intel_dp->compliance.test_data.bpc);
3731 			}
3732 		} else
3733 			seq_puts(m, "0");
3734 	}
3735 	drm_connector_list_iter_end(&conn_iter);
3736 
3737 	return 0;
3738 }
3739 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3740 
3741 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3742 {
3743 	struct drm_i915_private *dev_priv = m->private;
3744 	struct drm_device *dev = &dev_priv->drm;
3745 	struct drm_connector *connector;
3746 	struct drm_connector_list_iter conn_iter;
3747 	struct intel_dp *intel_dp;
3748 
3749 	drm_connector_list_iter_begin(dev, &conn_iter);
3750 	drm_for_each_connector_iter(connector, &conn_iter) {
3751 		struct intel_encoder *encoder;
3752 
3753 		if (connector->connector_type !=
3754 		    DRM_MODE_CONNECTOR_DisplayPort)
3755 			continue;
3756 
3757 		encoder = to_intel_encoder(connector->encoder);
3758 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3759 			continue;
3760 
3761 		if (encoder && connector->status == connector_status_connected) {
3762 			intel_dp = enc_to_intel_dp(&encoder->base);
3763 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3764 		} else
3765 			seq_puts(m, "0");
3766 	}
3767 	drm_connector_list_iter_end(&conn_iter);
3768 
3769 	return 0;
3770 }
3771 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3772 
3773 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3774 {
3775 	struct drm_i915_private *dev_priv = m->private;
3776 	struct drm_device *dev = &dev_priv->drm;
3777 	int level;
3778 	int num_levels;
3779 
3780 	if (IS_CHERRYVIEW(dev_priv))
3781 		num_levels = 3;
3782 	else if (IS_VALLEYVIEW(dev_priv))
3783 		num_levels = 1;
3784 	else if (IS_G4X(dev_priv))
3785 		num_levels = 3;
3786 	else
3787 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3788 
3789 	drm_modeset_lock_all(dev);
3790 
3791 	for (level = 0; level < num_levels; level++) {
3792 		unsigned int latency = wm[level];
3793 
3794 		/*
3795 		 * - WM1+ latency values in 0.5us units
3796 		 * - latencies are in us on gen9/vlv/chv
3797 		 */
3798 		if (INTEL_GEN(dev_priv) >= 9 ||
3799 		    IS_VALLEYVIEW(dev_priv) ||
3800 		    IS_CHERRYVIEW(dev_priv) ||
3801 		    IS_G4X(dev_priv))
3802 			latency *= 10;
3803 		else if (level > 0)
3804 			latency *= 5;
3805 
3806 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3807 			   level, wm[level], latency / 10, latency % 10);
3808 	}
3809 
3810 	drm_modeset_unlock_all(dev);
3811 }
3812 
3813 static int pri_wm_latency_show(struct seq_file *m, void *data)
3814 {
3815 	struct drm_i915_private *dev_priv = m->private;
3816 	const uint16_t *latencies;
3817 
3818 	if (INTEL_GEN(dev_priv) >= 9)
3819 		latencies = dev_priv->wm.skl_latency;
3820 	else
3821 		latencies = dev_priv->wm.pri_latency;
3822 
3823 	wm_latency_show(m, latencies);
3824 
3825 	return 0;
3826 }
3827 
3828 static int spr_wm_latency_show(struct seq_file *m, void *data)
3829 {
3830 	struct drm_i915_private *dev_priv = m->private;
3831 	const uint16_t *latencies;
3832 
3833 	if (INTEL_GEN(dev_priv) >= 9)
3834 		latencies = dev_priv->wm.skl_latency;
3835 	else
3836 		latencies = dev_priv->wm.spr_latency;
3837 
3838 	wm_latency_show(m, latencies);
3839 
3840 	return 0;
3841 }
3842 
3843 static int cur_wm_latency_show(struct seq_file *m, void *data)
3844 {
3845 	struct drm_i915_private *dev_priv = m->private;
3846 	const uint16_t *latencies;
3847 
3848 	if (INTEL_GEN(dev_priv) >= 9)
3849 		latencies = dev_priv->wm.skl_latency;
3850 	else
3851 		latencies = dev_priv->wm.cur_latency;
3852 
3853 	wm_latency_show(m, latencies);
3854 
3855 	return 0;
3856 }
3857 
3858 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3859 {
3860 	struct drm_i915_private *dev_priv = inode->i_private;
3861 
3862 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3863 		return -ENODEV;
3864 
3865 	return single_open(file, pri_wm_latency_show, dev_priv);
3866 }
3867 
3868 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3869 {
3870 	struct drm_i915_private *dev_priv = inode->i_private;
3871 
3872 	if (HAS_GMCH_DISPLAY(dev_priv))
3873 		return -ENODEV;
3874 
3875 	return single_open(file, spr_wm_latency_show, dev_priv);
3876 }
3877 
3878 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3879 {
3880 	struct drm_i915_private *dev_priv = inode->i_private;
3881 
3882 	if (HAS_GMCH_DISPLAY(dev_priv))
3883 		return -ENODEV;
3884 
3885 	return single_open(file, cur_wm_latency_show, dev_priv);
3886 }
3887 
3888 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3889 				size_t len, loff_t *offp, uint16_t wm[8])
3890 {
3891 	struct seq_file *m = file->private_data;
3892 	struct drm_i915_private *dev_priv = m->private;
3893 	struct drm_device *dev = &dev_priv->drm;
3894 	uint16_t new[8] = { 0 };
3895 	int num_levels;
3896 	int level;
3897 	int ret;
3898 	char tmp[32];
3899 
3900 	if (IS_CHERRYVIEW(dev_priv))
3901 		num_levels = 3;
3902 	else if (IS_VALLEYVIEW(dev_priv))
3903 		num_levels = 1;
3904 	else if (IS_G4X(dev_priv))
3905 		num_levels = 3;
3906 	else
3907 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3908 
3909 	if (len >= sizeof(tmp))
3910 		return -EINVAL;
3911 
3912 	if (copy_from_user(tmp, ubuf, len))
3913 		return -EFAULT;
3914 
3915 	tmp[len] = '\0';
3916 
3917 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3918 		     &new[0], &new[1], &new[2], &new[3],
3919 		     &new[4], &new[5], &new[6], &new[7]);
3920 	if (ret != num_levels)
3921 		return -EINVAL;
3922 
3923 	drm_modeset_lock_all(dev);
3924 
3925 	for (level = 0; level < num_levels; level++)
3926 		wm[level] = new[level];
3927 
3928 	drm_modeset_unlock_all(dev);
3929 
3930 	return len;
3931 }
3932 
3933 
3934 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3935 				    size_t len, loff_t *offp)
3936 {
3937 	struct seq_file *m = file->private_data;
3938 	struct drm_i915_private *dev_priv = m->private;
3939 	uint16_t *latencies;
3940 
3941 	if (INTEL_GEN(dev_priv) >= 9)
3942 		latencies = dev_priv->wm.skl_latency;
3943 	else
3944 		latencies = dev_priv->wm.pri_latency;
3945 
3946 	return wm_latency_write(file, ubuf, len, offp, latencies);
3947 }
3948 
3949 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3950 				    size_t len, loff_t *offp)
3951 {
3952 	struct seq_file *m = file->private_data;
3953 	struct drm_i915_private *dev_priv = m->private;
3954 	uint16_t *latencies;
3955 
3956 	if (INTEL_GEN(dev_priv) >= 9)
3957 		latencies = dev_priv->wm.skl_latency;
3958 	else
3959 		latencies = dev_priv->wm.spr_latency;
3960 
3961 	return wm_latency_write(file, ubuf, len, offp, latencies);
3962 }
3963 
3964 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3965 				    size_t len, loff_t *offp)
3966 {
3967 	struct seq_file *m = file->private_data;
3968 	struct drm_i915_private *dev_priv = m->private;
3969 	uint16_t *latencies;
3970 
3971 	if (INTEL_GEN(dev_priv) >= 9)
3972 		latencies = dev_priv->wm.skl_latency;
3973 	else
3974 		latencies = dev_priv->wm.cur_latency;
3975 
3976 	return wm_latency_write(file, ubuf, len, offp, latencies);
3977 }
3978 
3979 static const struct file_operations i915_pri_wm_latency_fops = {
3980 	.owner = THIS_MODULE,
3981 	.open = pri_wm_latency_open,
3982 	.read = seq_read,
3983 	.llseek = seq_lseek,
3984 	.release = single_release,
3985 	.write = pri_wm_latency_write
3986 };
3987 
3988 static const struct file_operations i915_spr_wm_latency_fops = {
3989 	.owner = THIS_MODULE,
3990 	.open = spr_wm_latency_open,
3991 	.read = seq_read,
3992 	.llseek = seq_lseek,
3993 	.release = single_release,
3994 	.write = spr_wm_latency_write
3995 };
3996 
3997 static const struct file_operations i915_cur_wm_latency_fops = {
3998 	.owner = THIS_MODULE,
3999 	.open = cur_wm_latency_open,
4000 	.read = seq_read,
4001 	.llseek = seq_lseek,
4002 	.release = single_release,
4003 	.write = cur_wm_latency_write
4004 };
4005 
4006 static int
4007 i915_wedged_get(void *data, u64 *val)
4008 {
4009 	struct drm_i915_private *dev_priv = data;
4010 
4011 	*val = i915_terminally_wedged(&dev_priv->gpu_error);
4012 
4013 	return 0;
4014 }
4015 
4016 static int
4017 i915_wedged_set(void *data, u64 val)
4018 {
4019 	struct drm_i915_private *i915 = data;
4020 	struct intel_engine_cs *engine;
4021 	unsigned int tmp;
4022 
4023 	/*
4024 	 * There is no safeguard against this debugfs entry colliding
4025 	 * with the hangcheck calling same i915_handle_error() in
4026 	 * parallel, causing an explosion. For now we assume that the
4027 	 * test harness is responsible enough not to inject gpu hangs
4028 	 * while it is writing to 'i915_wedged'
4029 	 */
4030 
4031 	if (i915_reset_backoff(&i915->gpu_error))
4032 		return -EAGAIN;
4033 
4034 	for_each_engine_masked(engine, i915, val, tmp) {
4035 		engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4036 		engine->hangcheck.stalled = true;
4037 	}
4038 
4039 	i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4040 			  "Manually set wedged engine mask = %llx", val);
4041 
4042 	wait_on_bit(&i915->gpu_error.flags,
4043 		    I915_RESET_HANDOFF,
4044 		    TASK_UNINTERRUPTIBLE);
4045 
4046 	return 0;
4047 }
4048 
4049 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4050 			i915_wedged_get, i915_wedged_set,
4051 			"%llu\n");
4052 
4053 static int
4054 fault_irq_set(struct drm_i915_private *i915,
4055 	      unsigned long *irq,
4056 	      unsigned long val)
4057 {
4058 	int err;
4059 
4060 	err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4061 	if (err)
4062 		return err;
4063 
4064 	err = i915_gem_wait_for_idle(i915,
4065 				     I915_WAIT_LOCKED |
4066 				     I915_WAIT_INTERRUPTIBLE,
4067 				     MAX_SCHEDULE_TIMEOUT);
4068 	if (err)
4069 		goto err_unlock;
4070 
4071 	*irq = val;
4072 	mutex_unlock(&i915->drm.struct_mutex);
4073 
4074 	/* Flush idle worker to disarm irq */
4075 	drain_delayed_work(&i915->gt.idle_work);
4076 
4077 	return 0;
4078 
4079 err_unlock:
4080 	mutex_unlock(&i915->drm.struct_mutex);
4081 	return err;
4082 }
4083 
4084 static int
4085 i915_ring_missed_irq_get(void *data, u64 *val)
4086 {
4087 	struct drm_i915_private *dev_priv = data;
4088 
4089 	*val = dev_priv->gpu_error.missed_irq_rings;
4090 	return 0;
4091 }
4092 
4093 static int
4094 i915_ring_missed_irq_set(void *data, u64 val)
4095 {
4096 	struct drm_i915_private *i915 = data;
4097 
4098 	return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4099 }
4100 
4101 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4102 			i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4103 			"0x%08llx\n");
4104 
4105 static int
4106 i915_ring_test_irq_get(void *data, u64 *val)
4107 {
4108 	struct drm_i915_private *dev_priv = data;
4109 
4110 	*val = dev_priv->gpu_error.test_irq_rings;
4111 
4112 	return 0;
4113 }
4114 
4115 static int
4116 i915_ring_test_irq_set(void *data, u64 val)
4117 {
4118 	struct drm_i915_private *i915 = data;
4119 
4120 	/* GuC keeps the user interrupt permanently enabled for submission */
4121 	if (USES_GUC_SUBMISSION(i915))
4122 		return -ENODEV;
4123 
4124 	/*
4125 	 * From icl, we can no longer individually mask interrupt generation
4126 	 * from each engine.
4127 	 */
4128 	if (INTEL_GEN(i915) >= 11)
4129 		return -ENODEV;
4130 
4131 	val &= INTEL_INFO(i915)->ring_mask;
4132 	DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4133 
4134 	return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4135 }
4136 
4137 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4138 			i915_ring_test_irq_get, i915_ring_test_irq_set,
4139 			"0x%08llx\n");
4140 
4141 #define DROP_UNBOUND	BIT(0)
4142 #define DROP_BOUND	BIT(1)
4143 #define DROP_RETIRE	BIT(2)
4144 #define DROP_ACTIVE	BIT(3)
4145 #define DROP_FREED	BIT(4)
4146 #define DROP_SHRINK_ALL	BIT(5)
4147 #define DROP_IDLE	BIT(6)
4148 #define DROP_RESET_ACTIVE	BIT(7)
4149 #define DROP_RESET_SEQNO	BIT(8)
4150 #define DROP_ALL (DROP_UNBOUND	| \
4151 		  DROP_BOUND	| \
4152 		  DROP_RETIRE	| \
4153 		  DROP_ACTIVE	| \
4154 		  DROP_FREED	| \
4155 		  DROP_SHRINK_ALL |\
4156 		  DROP_IDLE	| \
4157 		  DROP_RESET_ACTIVE | \
4158 		  DROP_RESET_SEQNO)
4159 static int
4160 i915_drop_caches_get(void *data, u64 *val)
4161 {
4162 	*val = DROP_ALL;
4163 
4164 	return 0;
4165 }
4166 
4167 static int
4168 i915_drop_caches_set(void *data, u64 val)
4169 {
4170 	struct drm_i915_private *i915 = data;
4171 	int ret = 0;
4172 
4173 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4174 		  val, val & DROP_ALL);
4175 
4176 	if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4177 		i915_gem_set_wedged(i915);
4178 
4179 	/* No need to check and wait for gpu resets, only libdrm auto-restarts
4180 	 * on ioctls on -EAGAIN. */
4181 	if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4182 		ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
4183 		if (ret)
4184 			return ret;
4185 
4186 		if (val & DROP_ACTIVE)
4187 			ret = i915_gem_wait_for_idle(i915,
4188 						     I915_WAIT_INTERRUPTIBLE |
4189 						     I915_WAIT_LOCKED,
4190 						     MAX_SCHEDULE_TIMEOUT);
4191 
4192 		if (ret == 0 && val & DROP_RESET_SEQNO) {
4193 			intel_runtime_pm_get(i915);
4194 			ret = i915_gem_set_global_seqno(&i915->drm, 1);
4195 			intel_runtime_pm_put(i915);
4196 		}
4197 
4198 		if (val & DROP_RETIRE)
4199 			i915_retire_requests(i915);
4200 
4201 		mutex_unlock(&i915->drm.struct_mutex);
4202 	}
4203 
4204 	if (val & DROP_RESET_ACTIVE &&
4205 	    i915_terminally_wedged(&i915->gpu_error)) {
4206 		i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4207 		wait_on_bit(&i915->gpu_error.flags,
4208 			    I915_RESET_HANDOFF,
4209 			    TASK_UNINTERRUPTIBLE);
4210 	}
4211 
4212 	fs_reclaim_acquire(GFP_KERNEL);
4213 	if (val & DROP_BOUND)
4214 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4215 
4216 	if (val & DROP_UNBOUND)
4217 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4218 
4219 	if (val & DROP_SHRINK_ALL)
4220 		i915_gem_shrink_all(i915);
4221 	fs_reclaim_release(GFP_KERNEL);
4222 
4223 	if (val & DROP_IDLE) {
4224 		do {
4225 			if (READ_ONCE(i915->gt.active_requests))
4226 				flush_delayed_work(&i915->gt.retire_work);
4227 			drain_delayed_work(&i915->gt.idle_work);
4228 		} while (READ_ONCE(i915->gt.awake));
4229 	}
4230 
4231 	if (val & DROP_FREED)
4232 		i915_gem_drain_freed_objects(i915);
4233 
4234 	return ret;
4235 }
4236 
4237 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4238 			i915_drop_caches_get, i915_drop_caches_set,
4239 			"0x%08llx\n");
4240 
4241 static int
4242 i915_cache_sharing_get(void *data, u64 *val)
4243 {
4244 	struct drm_i915_private *dev_priv = data;
4245 	u32 snpcr;
4246 
4247 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4248 		return -ENODEV;
4249 
4250 	intel_runtime_pm_get(dev_priv);
4251 
4252 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4253 
4254 	intel_runtime_pm_put(dev_priv);
4255 
4256 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4257 
4258 	return 0;
4259 }
4260 
4261 static int
4262 i915_cache_sharing_set(void *data, u64 val)
4263 {
4264 	struct drm_i915_private *dev_priv = data;
4265 	u32 snpcr;
4266 
4267 	if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4268 		return -ENODEV;
4269 
4270 	if (val > 3)
4271 		return -EINVAL;
4272 
4273 	intel_runtime_pm_get(dev_priv);
4274 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4275 
4276 	/* Update the cache sharing policy here as well */
4277 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4278 	snpcr &= ~GEN6_MBC_SNPCR_MASK;
4279 	snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4280 	I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4281 
4282 	intel_runtime_pm_put(dev_priv);
4283 	return 0;
4284 }
4285 
4286 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4287 			i915_cache_sharing_get, i915_cache_sharing_set,
4288 			"%llu\n");
4289 
4290 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4291 					  struct sseu_dev_info *sseu)
4292 {
4293 #define SS_MAX 2
4294 	const int ss_max = SS_MAX;
4295 	u32 sig1[SS_MAX], sig2[SS_MAX];
4296 	int ss;
4297 
4298 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4299 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4300 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4301 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4302 
4303 	for (ss = 0; ss < ss_max; ss++) {
4304 		unsigned int eu_cnt;
4305 
4306 		if (sig1[ss] & CHV_SS_PG_ENABLE)
4307 			/* skip disabled subslice */
4308 			continue;
4309 
4310 		sseu->slice_mask = BIT(0);
4311 		sseu->subslice_mask[0] |= BIT(ss);
4312 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4313 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4314 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4315 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4316 		sseu->eu_total += eu_cnt;
4317 		sseu->eu_per_subslice = max_t(unsigned int,
4318 					      sseu->eu_per_subslice, eu_cnt);
4319 	}
4320 #undef SS_MAX
4321 }
4322 
4323 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4324 				     struct sseu_dev_info *sseu)
4325 {
4326 #define SS_MAX 6
4327 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4328 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4329 	int s, ss;
4330 
4331 	for (s = 0; s < info->sseu.max_slices; s++) {
4332 		/*
4333 		 * FIXME: Valid SS Mask respects the spec and read
4334 		 * only valid bits for those registers, excluding reserverd
4335 		 * although this seems wrong because it would leave many
4336 		 * subslices without ACK.
4337 		 */
4338 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4339 			GEN10_PGCTL_VALID_SS_MASK(s);
4340 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4341 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4342 	}
4343 
4344 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4345 		     GEN9_PGCTL_SSA_EU19_ACK |
4346 		     GEN9_PGCTL_SSA_EU210_ACK |
4347 		     GEN9_PGCTL_SSA_EU311_ACK;
4348 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4349 		     GEN9_PGCTL_SSB_EU19_ACK |
4350 		     GEN9_PGCTL_SSB_EU210_ACK |
4351 		     GEN9_PGCTL_SSB_EU311_ACK;
4352 
4353 	for (s = 0; s < info->sseu.max_slices; s++) {
4354 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4355 			/* skip disabled slice */
4356 			continue;
4357 
4358 		sseu->slice_mask |= BIT(s);
4359 		sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4360 
4361 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4362 			unsigned int eu_cnt;
4363 
4364 			if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4365 				/* skip disabled subslice */
4366 				continue;
4367 
4368 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4369 					       eu_mask[ss % 2]);
4370 			sseu->eu_total += eu_cnt;
4371 			sseu->eu_per_subslice = max_t(unsigned int,
4372 						      sseu->eu_per_subslice,
4373 						      eu_cnt);
4374 		}
4375 	}
4376 #undef SS_MAX
4377 }
4378 
4379 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4380 				    struct sseu_dev_info *sseu)
4381 {
4382 #define SS_MAX 3
4383 	const struct intel_device_info *info = INTEL_INFO(dev_priv);
4384 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4385 	int s, ss;
4386 
4387 	for (s = 0; s < info->sseu.max_slices; s++) {
4388 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4389 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4390 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4391 	}
4392 
4393 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4394 		     GEN9_PGCTL_SSA_EU19_ACK |
4395 		     GEN9_PGCTL_SSA_EU210_ACK |
4396 		     GEN9_PGCTL_SSA_EU311_ACK;
4397 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4398 		     GEN9_PGCTL_SSB_EU19_ACK |
4399 		     GEN9_PGCTL_SSB_EU210_ACK |
4400 		     GEN9_PGCTL_SSB_EU311_ACK;
4401 
4402 	for (s = 0; s < info->sseu.max_slices; s++) {
4403 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4404 			/* skip disabled slice */
4405 			continue;
4406 
4407 		sseu->slice_mask |= BIT(s);
4408 
4409 		if (IS_GEN9_BC(dev_priv))
4410 			sseu->subslice_mask[s] =
4411 				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4412 
4413 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4414 			unsigned int eu_cnt;
4415 
4416 			if (IS_GEN9_LP(dev_priv)) {
4417 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4418 					/* skip disabled subslice */
4419 					continue;
4420 
4421 				sseu->subslice_mask[s] |= BIT(ss);
4422 			}
4423 
4424 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4425 					       eu_mask[ss%2]);
4426 			sseu->eu_total += eu_cnt;
4427 			sseu->eu_per_subslice = max_t(unsigned int,
4428 						      sseu->eu_per_subslice,
4429 						      eu_cnt);
4430 		}
4431 	}
4432 #undef SS_MAX
4433 }
4434 
4435 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4436 					 struct sseu_dev_info *sseu)
4437 {
4438 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4439 	int s;
4440 
4441 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4442 
4443 	if (sseu->slice_mask) {
4444 		sseu->eu_per_subslice =
4445 				INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4446 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4447 			sseu->subslice_mask[s] =
4448 				INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4449 		}
4450 		sseu->eu_total = sseu->eu_per_subslice *
4451 				 sseu_subslice_total(sseu);
4452 
4453 		/* subtract fused off EU(s) from enabled slice(s) */
4454 		for (s = 0; s < fls(sseu->slice_mask); s++) {
4455 			u8 subslice_7eu =
4456 				INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4457 
4458 			sseu->eu_total -= hweight8(subslice_7eu);
4459 		}
4460 	}
4461 }
4462 
4463 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4464 				 const struct sseu_dev_info *sseu)
4465 {
4466 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4467 	const char *type = is_available_info ? "Available" : "Enabled";
4468 	int s;
4469 
4470 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
4471 		   sseu->slice_mask);
4472 	seq_printf(m, "  %s Slice Total: %u\n", type,
4473 		   hweight8(sseu->slice_mask));
4474 	seq_printf(m, "  %s Subslice Total: %u\n", type,
4475 		   sseu_subslice_total(sseu));
4476 	for (s = 0; s < fls(sseu->slice_mask); s++) {
4477 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4478 			   s, hweight8(sseu->subslice_mask[s]));
4479 	}
4480 	seq_printf(m, "  %s EU Total: %u\n", type,
4481 		   sseu->eu_total);
4482 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4483 		   sseu->eu_per_subslice);
4484 
4485 	if (!is_available_info)
4486 		return;
4487 
4488 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4489 	if (HAS_POOLED_EU(dev_priv))
4490 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4491 
4492 	seq_printf(m, "  Has Slice Power Gating: %s\n",
4493 		   yesno(sseu->has_slice_pg));
4494 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
4495 		   yesno(sseu->has_subslice_pg));
4496 	seq_printf(m, "  Has EU Power Gating: %s\n",
4497 		   yesno(sseu->has_eu_pg));
4498 }
4499 
4500 static int i915_sseu_status(struct seq_file *m, void *unused)
4501 {
4502 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
4503 	struct sseu_dev_info sseu;
4504 
4505 	if (INTEL_GEN(dev_priv) < 8)
4506 		return -ENODEV;
4507 
4508 	seq_puts(m, "SSEU Device Info\n");
4509 	i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4510 
4511 	seq_puts(m, "SSEU Device Status\n");
4512 	memset(&sseu, 0, sizeof(sseu));
4513 	sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4514 	sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4515 	sseu.max_eus_per_subslice =
4516 		INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4517 
4518 	intel_runtime_pm_get(dev_priv);
4519 
4520 	if (IS_CHERRYVIEW(dev_priv)) {
4521 		cherryview_sseu_device_status(dev_priv, &sseu);
4522 	} else if (IS_BROADWELL(dev_priv)) {
4523 		broadwell_sseu_device_status(dev_priv, &sseu);
4524 	} else if (IS_GEN9(dev_priv)) {
4525 		gen9_sseu_device_status(dev_priv, &sseu);
4526 	} else if (INTEL_GEN(dev_priv) >= 10) {
4527 		gen10_sseu_device_status(dev_priv, &sseu);
4528 	}
4529 
4530 	intel_runtime_pm_put(dev_priv);
4531 
4532 	i915_print_sseu_info(m, false, &sseu);
4533 
4534 	return 0;
4535 }
4536 
4537 static int i915_forcewake_open(struct inode *inode, struct file *file)
4538 {
4539 	struct drm_i915_private *i915 = inode->i_private;
4540 
4541 	if (INTEL_GEN(i915) < 6)
4542 		return 0;
4543 
4544 	intel_runtime_pm_get(i915);
4545 	intel_uncore_forcewake_user_get(i915);
4546 
4547 	return 0;
4548 }
4549 
4550 static int i915_forcewake_release(struct inode *inode, struct file *file)
4551 {
4552 	struct drm_i915_private *i915 = inode->i_private;
4553 
4554 	if (INTEL_GEN(i915) < 6)
4555 		return 0;
4556 
4557 	intel_uncore_forcewake_user_put(i915);
4558 	intel_runtime_pm_put(i915);
4559 
4560 	return 0;
4561 }
4562 
4563 static const struct file_operations i915_forcewake_fops = {
4564 	.owner = THIS_MODULE,
4565 	.open = i915_forcewake_open,
4566 	.release = i915_forcewake_release,
4567 };
4568 
4569 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4570 {
4571 	struct drm_i915_private *dev_priv = m->private;
4572 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4573 
4574 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4575 	seq_printf(m, "Detected: %s\n",
4576 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
4577 
4578 	return 0;
4579 }
4580 
4581 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4582 					const char __user *ubuf, size_t len,
4583 					loff_t *offp)
4584 {
4585 	struct seq_file *m = file->private_data;
4586 	struct drm_i915_private *dev_priv = m->private;
4587 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4588 	unsigned int new_threshold;
4589 	int i;
4590 	char *newline;
4591 	char tmp[16];
4592 
4593 	if (len >= sizeof(tmp))
4594 		return -EINVAL;
4595 
4596 	if (copy_from_user(tmp, ubuf, len))
4597 		return -EFAULT;
4598 
4599 	tmp[len] = '\0';
4600 
4601 	/* Strip newline, if any */
4602 	newline = strchr(tmp, '\n');
4603 	if (newline)
4604 		*newline = '\0';
4605 
4606 	if (strcmp(tmp, "reset") == 0)
4607 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4608 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4609 		return -EINVAL;
4610 
4611 	if (new_threshold > 0)
4612 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4613 			      new_threshold);
4614 	else
4615 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4616 
4617 	spin_lock_irq(&dev_priv->irq_lock);
4618 	hotplug->hpd_storm_threshold = new_threshold;
4619 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4620 	for_each_hpd_pin(i)
4621 		hotplug->stats[i].count = 0;
4622 	spin_unlock_irq(&dev_priv->irq_lock);
4623 
4624 	/* Re-enable hpd immediately if we were in an irq storm */
4625 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4626 
4627 	return len;
4628 }
4629 
4630 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4631 {
4632 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4633 }
4634 
4635 static const struct file_operations i915_hpd_storm_ctl_fops = {
4636 	.owner = THIS_MODULE,
4637 	.open = i915_hpd_storm_ctl_open,
4638 	.read = seq_read,
4639 	.llseek = seq_lseek,
4640 	.release = single_release,
4641 	.write = i915_hpd_storm_ctl_write
4642 };
4643 
4644 static int i915_drrs_ctl_set(void *data, u64 val)
4645 {
4646 	struct drm_i915_private *dev_priv = data;
4647 	struct drm_device *dev = &dev_priv->drm;
4648 	struct intel_crtc *intel_crtc;
4649 	struct intel_encoder *encoder;
4650 	struct intel_dp *intel_dp;
4651 
4652 	if (INTEL_GEN(dev_priv) < 7)
4653 		return -ENODEV;
4654 
4655 	drm_modeset_lock_all(dev);
4656 	for_each_intel_crtc(dev, intel_crtc) {
4657 		if (!intel_crtc->base.state->active ||
4658 					!intel_crtc->config->has_drrs)
4659 			continue;
4660 
4661 		for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
4662 			if (encoder->type != INTEL_OUTPUT_EDP)
4663 				continue;
4664 
4665 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4666 						val ? "en" : "dis", val);
4667 
4668 			intel_dp = enc_to_intel_dp(&encoder->base);
4669 			if (val)
4670 				intel_edp_drrs_enable(intel_dp,
4671 							intel_crtc->config);
4672 			else
4673 				intel_edp_drrs_disable(intel_dp,
4674 							intel_crtc->config);
4675 		}
4676 	}
4677 	drm_modeset_unlock_all(dev);
4678 
4679 	return 0;
4680 }
4681 
4682 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4683 
4684 static ssize_t
4685 i915_fifo_underrun_reset_write(struct file *filp,
4686 			       const char __user *ubuf,
4687 			       size_t cnt, loff_t *ppos)
4688 {
4689 	struct drm_i915_private *dev_priv = filp->private_data;
4690 	struct intel_crtc *intel_crtc;
4691 	struct drm_device *dev = &dev_priv->drm;
4692 	int ret;
4693 	bool reset;
4694 
4695 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4696 	if (ret)
4697 		return ret;
4698 
4699 	if (!reset)
4700 		return cnt;
4701 
4702 	for_each_intel_crtc(dev, intel_crtc) {
4703 		struct drm_crtc_commit *commit;
4704 		struct intel_crtc_state *crtc_state;
4705 
4706 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4707 		if (ret)
4708 			return ret;
4709 
4710 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4711 		commit = crtc_state->base.commit;
4712 		if (commit) {
4713 			ret = wait_for_completion_interruptible(&commit->hw_done);
4714 			if (!ret)
4715 				ret = wait_for_completion_interruptible(&commit->flip_done);
4716 		}
4717 
4718 		if (!ret && crtc_state->base.active) {
4719 			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4720 				      pipe_name(intel_crtc->pipe));
4721 
4722 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4723 		}
4724 
4725 		drm_modeset_unlock(&intel_crtc->base.mutex);
4726 
4727 		if (ret)
4728 			return ret;
4729 	}
4730 
4731 	ret = intel_fbc_reset_underrun(dev_priv);
4732 	if (ret)
4733 		return ret;
4734 
4735 	return cnt;
4736 }
4737 
4738 static const struct file_operations i915_fifo_underrun_reset_ops = {
4739 	.owner = THIS_MODULE,
4740 	.open = simple_open,
4741 	.write = i915_fifo_underrun_reset_write,
4742 	.llseek = default_llseek,
4743 };
4744 
4745 static const struct drm_info_list i915_debugfs_list[] = {
4746 	{"i915_capabilities", i915_capabilities, 0},
4747 	{"i915_gem_objects", i915_gem_object_info, 0},
4748 	{"i915_gem_gtt", i915_gem_gtt_info, 0},
4749 	{"i915_gem_stolen", i915_gem_stolen_list_info },
4750 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4751 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4752 	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4753 	{"i915_guc_info", i915_guc_info, 0},
4754 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4755 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4756 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4757 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4758 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4759 	{"i915_frequency_info", i915_frequency_info, 0},
4760 	{"i915_hangcheck_info", i915_hangcheck_info, 0},
4761 	{"i915_reset_info", i915_reset_info, 0},
4762 	{"i915_drpc_info", i915_drpc_info, 0},
4763 	{"i915_emon_status", i915_emon_status, 0},
4764 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4765 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4766 	{"i915_fbc_status", i915_fbc_status, 0},
4767 	{"i915_ips_status", i915_ips_status, 0},
4768 	{"i915_sr_status", i915_sr_status, 0},
4769 	{"i915_opregion", i915_opregion, 0},
4770 	{"i915_vbt", i915_vbt, 0},
4771 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4772 	{"i915_context_status", i915_context_status, 0},
4773 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4774 	{"i915_swizzle_info", i915_swizzle_info, 0},
4775 	{"i915_ppgtt_info", i915_ppgtt_info, 0},
4776 	{"i915_llc", i915_llc, 0},
4777 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4778 	{"i915_energy_uJ", i915_energy_uJ, 0},
4779 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4780 	{"i915_power_domain_info", i915_power_domain_info, 0},
4781 	{"i915_dmc_info", i915_dmc_info, 0},
4782 	{"i915_display_info", i915_display_info, 0},
4783 	{"i915_engine_info", i915_engine_info, 0},
4784 	{"i915_rcs_topology", i915_rcs_topology, 0},
4785 	{"i915_shrinker_info", i915_shrinker_info, 0},
4786 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4787 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4788 	{"i915_wa_registers", i915_wa_registers, 0},
4789 	{"i915_ddb_info", i915_ddb_info, 0},
4790 	{"i915_sseu_status", i915_sseu_status, 0},
4791 	{"i915_drrs_status", i915_drrs_status, 0},
4792 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4793 };
4794 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4795 
4796 static const struct i915_debugfs_files {
4797 	const char *name;
4798 	const struct file_operations *fops;
4799 } i915_debugfs_files[] = {
4800 	{"i915_wedged", &i915_wedged_fops},
4801 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4802 	{"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4803 	{"i915_ring_test_irq", &i915_ring_test_irq_fops},
4804 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4805 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4806 	{"i915_error_state", &i915_error_state_fops},
4807 	{"i915_gpu_info", &i915_gpu_info_fops},
4808 #endif
4809 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4810 	{"i915_next_seqno", &i915_next_seqno_fops},
4811 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4812 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4813 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4814 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4815 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4816 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4817 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4818 	{"i915_guc_log_level", &i915_guc_log_level_fops},
4819 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4820 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4821 	{"i915_ipc_status", &i915_ipc_status_fops},
4822 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4823 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4824 };
4825 
4826 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4827 {
4828 	struct drm_minor *minor = dev_priv->drm.primary;
4829 	struct dentry *ent;
4830 	int i;
4831 
4832 	ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4833 				  minor->debugfs_root, to_i915(minor->dev),
4834 				  &i915_forcewake_fops);
4835 	if (!ent)
4836 		return -ENOMEM;
4837 
4838 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4839 		ent = debugfs_create_file(i915_debugfs_files[i].name,
4840 					  S_IRUGO | S_IWUSR,
4841 					  minor->debugfs_root,
4842 					  to_i915(minor->dev),
4843 					  i915_debugfs_files[i].fops);
4844 		if (!ent)
4845 			return -ENOMEM;
4846 	}
4847 
4848 	return drm_debugfs_create_files(i915_debugfs_list,
4849 					I915_DEBUGFS_ENTRIES,
4850 					minor->debugfs_root, minor);
4851 }
4852 
4853 struct dpcd_block {
4854 	/* DPCD dump start address. */
4855 	unsigned int offset;
4856 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4857 	unsigned int end;
4858 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4859 	size_t size;
4860 	/* Only valid for eDP. */
4861 	bool edp;
4862 };
4863 
4864 static const struct dpcd_block i915_dpcd_debug[] = {
4865 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4866 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4867 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4868 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4869 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4870 	{ .offset = DP_SET_POWER },
4871 	{ .offset = DP_EDP_DPCD_REV },
4872 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4873 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4874 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4875 };
4876 
4877 static int i915_dpcd_show(struct seq_file *m, void *data)
4878 {
4879 	struct drm_connector *connector = m->private;
4880 	struct intel_dp *intel_dp =
4881 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4882 	uint8_t buf[16];
4883 	ssize_t err;
4884 	int i;
4885 
4886 	if (connector->status != connector_status_connected)
4887 		return -ENODEV;
4888 
4889 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4890 		const struct dpcd_block *b = &i915_dpcd_debug[i];
4891 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4892 
4893 		if (b->edp &&
4894 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4895 			continue;
4896 
4897 		/* low tech for now */
4898 		if (WARN_ON(size > sizeof(buf)))
4899 			continue;
4900 
4901 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4902 		if (err <= 0) {
4903 			DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
4904 				  size, b->offset, err);
4905 			continue;
4906 		}
4907 
4908 		seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
4909 	}
4910 
4911 	return 0;
4912 }
4913 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4914 
4915 static int i915_panel_show(struct seq_file *m, void *data)
4916 {
4917 	struct drm_connector *connector = m->private;
4918 	struct intel_dp *intel_dp =
4919 		enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4920 
4921 	if (connector->status != connector_status_connected)
4922 		return -ENODEV;
4923 
4924 	seq_printf(m, "Panel power up delay: %d\n",
4925 		   intel_dp->panel_power_up_delay);
4926 	seq_printf(m, "Panel power down delay: %d\n",
4927 		   intel_dp->panel_power_down_delay);
4928 	seq_printf(m, "Backlight on delay: %d\n",
4929 		   intel_dp->backlight_on_delay);
4930 	seq_printf(m, "Backlight off delay: %d\n",
4931 		   intel_dp->backlight_off_delay);
4932 
4933 	return 0;
4934 }
4935 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4936 
4937 /**
4938  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4939  * @connector: pointer to a registered drm_connector
4940  *
4941  * Cleanup will be done by drm_connector_unregister() through a call to
4942  * drm_debugfs_connector_remove().
4943  *
4944  * Returns 0 on success, negative error codes on error.
4945  */
4946 int i915_debugfs_connector_add(struct drm_connector *connector)
4947 {
4948 	struct dentry *root = connector->debugfs_entry;
4949 
4950 	/* The connector must have been registered beforehands. */
4951 	if (!root)
4952 		return -ENODEV;
4953 
4954 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4955 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4956 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4957 				    connector, &i915_dpcd_fops);
4958 
4959 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4960 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4961 				    connector, &i915_panel_fops);
4962 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4963 				    connector, &i915_psr_sink_status_fops);
4964 	}
4965 
4966 	return 0;
4967 }
4968