1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28 
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
31 
32 #include <drm/drm_debugfs.h>
33 
34 #include "gem/i915_gem_context.h"
35 #include "gt/intel_gt_pm.h"
36 #include "gt/intel_gt_requests.h"
37 #include "gt/intel_reset.h"
38 #include "gt/intel_rc6.h"
39 #include "gt/intel_rps.h"
40 #include "gt/uc/intel_guc_submission.h"
41 
42 #include "i915_debugfs.h"
43 #include "i915_debugfs_params.h"
44 #include "i915_irq.h"
45 #include "i915_trace.h"
46 #include "intel_pm.h"
47 #include "intel_sideband.h"
48 
49 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
50 {
51 	return to_i915(node->minor->dev);
52 }
53 
54 static int i915_capabilities(struct seq_file *m, void *data)
55 {
56 	struct drm_i915_private *i915 = node_to_i915(m->private);
57 	struct drm_printer p = drm_seq_file_printer(m);
58 
59 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
60 
61 	intel_device_info_print_static(INTEL_INFO(i915), &p);
62 	intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
63 	intel_driver_caps_print(&i915->caps, &p);
64 
65 	kernel_param_lock(THIS_MODULE);
66 	i915_params_dump(&i915_modparams, &p);
67 	kernel_param_unlock(THIS_MODULE);
68 
69 	return 0;
70 }
71 
72 static char get_tiling_flag(struct drm_i915_gem_object *obj)
73 {
74 	switch (i915_gem_object_get_tiling(obj)) {
75 	default:
76 	case I915_TILING_NONE: return ' ';
77 	case I915_TILING_X: return 'X';
78 	case I915_TILING_Y: return 'Y';
79 	}
80 }
81 
82 static char get_global_flag(struct drm_i915_gem_object *obj)
83 {
84 	return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
85 }
86 
87 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
88 {
89 	return obj->mm.mapping ? 'M' : ' ';
90 }
91 
92 static const char *
93 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
94 {
95 	size_t x = 0;
96 
97 	switch (page_sizes) {
98 	case 0:
99 		return "";
100 	case I915_GTT_PAGE_SIZE_4K:
101 		return "4K";
102 	case I915_GTT_PAGE_SIZE_64K:
103 		return "64K";
104 	case I915_GTT_PAGE_SIZE_2M:
105 		return "2M";
106 	default:
107 		if (!buf)
108 			return "M";
109 
110 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
111 			x += snprintf(buf + x, len - x, "2M, ");
112 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
113 			x += snprintf(buf + x, len - x, "64K, ");
114 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
115 			x += snprintf(buf + x, len - x, "4K, ");
116 		buf[x-2] = '\0';
117 
118 		return buf;
119 	}
120 }
121 
122 void
123 i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124 {
125 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
126 	struct intel_engine_cs *engine;
127 	struct i915_vma *vma;
128 	int pin_count = 0;
129 
130 	seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
131 		   &obj->base,
132 		   get_tiling_flag(obj),
133 		   get_global_flag(obj),
134 		   get_pin_mapped_flag(obj),
135 		   obj->base.size / 1024,
136 		   obj->read_domains,
137 		   obj->write_domain,
138 		   i915_cache_level_str(dev_priv, obj->cache_level),
139 		   obj->mm.dirty ? " dirty" : "",
140 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
141 	if (obj->base.name)
142 		seq_printf(m, " (name: %d)", obj->base.name);
143 
144 	spin_lock(&obj->vma.lock);
145 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
146 		if (!drm_mm_node_allocated(&vma->node))
147 			continue;
148 
149 		spin_unlock(&obj->vma.lock);
150 
151 		if (i915_vma_is_pinned(vma))
152 			pin_count++;
153 
154 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
155 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
156 			   vma->node.start, vma->node.size,
157 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
158 		if (i915_vma_is_ggtt(vma)) {
159 			switch (vma->ggtt_view.type) {
160 			case I915_GGTT_VIEW_NORMAL:
161 				seq_puts(m, ", normal");
162 				break;
163 
164 			case I915_GGTT_VIEW_PARTIAL:
165 				seq_printf(m, ", partial [%08llx+%x]",
166 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
167 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
168 				break;
169 
170 			case I915_GGTT_VIEW_ROTATED:
171 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
172 					   vma->ggtt_view.rotated.plane[0].width,
173 					   vma->ggtt_view.rotated.plane[0].height,
174 					   vma->ggtt_view.rotated.plane[0].stride,
175 					   vma->ggtt_view.rotated.plane[0].offset,
176 					   vma->ggtt_view.rotated.plane[1].width,
177 					   vma->ggtt_view.rotated.plane[1].height,
178 					   vma->ggtt_view.rotated.plane[1].stride,
179 					   vma->ggtt_view.rotated.plane[1].offset);
180 				break;
181 
182 			case I915_GGTT_VIEW_REMAPPED:
183 				seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
184 					   vma->ggtt_view.remapped.plane[0].width,
185 					   vma->ggtt_view.remapped.plane[0].height,
186 					   vma->ggtt_view.remapped.plane[0].stride,
187 					   vma->ggtt_view.remapped.plane[0].offset,
188 					   vma->ggtt_view.remapped.plane[1].width,
189 					   vma->ggtt_view.remapped.plane[1].height,
190 					   vma->ggtt_view.remapped.plane[1].stride,
191 					   vma->ggtt_view.remapped.plane[1].offset);
192 				break;
193 
194 			default:
195 				MISSING_CASE(vma->ggtt_view.type);
196 				break;
197 			}
198 		}
199 		if (vma->fence)
200 			seq_printf(m, " , fence: %d", vma->fence->id);
201 		seq_puts(m, ")");
202 
203 		spin_lock(&obj->vma.lock);
204 	}
205 	spin_unlock(&obj->vma.lock);
206 
207 	seq_printf(m, " (pinned x %d)", pin_count);
208 	if (obj->stolen)
209 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
210 	if (i915_gem_object_is_framebuffer(obj))
211 		seq_printf(m, " (fb)");
212 
213 	engine = i915_gem_object_last_write_engine(obj);
214 	if (engine)
215 		seq_printf(m, " (%s)", engine->name);
216 }
217 
218 struct file_stats {
219 	struct i915_address_space *vm;
220 	unsigned long count;
221 	u64 total, unbound;
222 	u64 active, inactive;
223 	u64 closed;
224 };
225 
226 static int per_file_stats(int id, void *ptr, void *data)
227 {
228 	struct drm_i915_gem_object *obj = ptr;
229 	struct file_stats *stats = data;
230 	struct i915_vma *vma;
231 
232 	if (!kref_get_unless_zero(&obj->base.refcount))
233 		return 0;
234 
235 	stats->count++;
236 	stats->total += obj->base.size;
237 	if (!atomic_read(&obj->bind_count))
238 		stats->unbound += obj->base.size;
239 
240 	spin_lock(&obj->vma.lock);
241 	if (!stats->vm) {
242 		for_each_ggtt_vma(vma, obj) {
243 			if (!drm_mm_node_allocated(&vma->node))
244 				continue;
245 
246 			if (i915_vma_is_active(vma))
247 				stats->active += vma->node.size;
248 			else
249 				stats->inactive += vma->node.size;
250 
251 			if (i915_vma_is_closed(vma))
252 				stats->closed += vma->node.size;
253 		}
254 	} else {
255 		struct rb_node *p = obj->vma.tree.rb_node;
256 
257 		while (p) {
258 			long cmp;
259 
260 			vma = rb_entry(p, typeof(*vma), obj_node);
261 			cmp = i915_vma_compare(vma, stats->vm, NULL);
262 			if (cmp == 0) {
263 				if (drm_mm_node_allocated(&vma->node)) {
264 					if (i915_vma_is_active(vma))
265 						stats->active += vma->node.size;
266 					else
267 						stats->inactive += vma->node.size;
268 
269 					if (i915_vma_is_closed(vma))
270 						stats->closed += vma->node.size;
271 				}
272 				break;
273 			}
274 			if (cmp < 0)
275 				p = p->rb_right;
276 			else
277 				p = p->rb_left;
278 		}
279 	}
280 	spin_unlock(&obj->vma.lock);
281 
282 	i915_gem_object_put(obj);
283 	return 0;
284 }
285 
286 #define print_file_stats(m, name, stats) do { \
287 	if (stats.count) \
288 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
289 			   name, \
290 			   stats.count, \
291 			   stats.total, \
292 			   stats.active, \
293 			   stats.inactive, \
294 			   stats.unbound, \
295 			   stats.closed); \
296 } while (0)
297 
298 static void print_context_stats(struct seq_file *m,
299 				struct drm_i915_private *i915)
300 {
301 	struct file_stats kstats = {};
302 	struct i915_gem_context *ctx, *cn;
303 
304 	spin_lock(&i915->gem.contexts.lock);
305 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
306 		struct i915_gem_engines_iter it;
307 		struct intel_context *ce;
308 
309 		if (!kref_get_unless_zero(&ctx->ref))
310 			continue;
311 
312 		spin_unlock(&i915->gem.contexts.lock);
313 
314 		for_each_gem_engine(ce,
315 				    i915_gem_context_lock_engines(ctx), it) {
316 			if (intel_context_pin_if_active(ce)) {
317 				rcu_read_lock();
318 				if (ce->state)
319 					per_file_stats(0,
320 						       ce->state->obj, &kstats);
321 				per_file_stats(0, ce->ring->vma->obj, &kstats);
322 				rcu_read_unlock();
323 				intel_context_unpin(ce);
324 			}
325 		}
326 		i915_gem_context_unlock_engines(ctx);
327 
328 		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
329 			struct file_stats stats = {
330 				.vm = rcu_access_pointer(ctx->vm),
331 			};
332 			struct drm_file *file = ctx->file_priv->file;
333 			struct task_struct *task;
334 			char name[80];
335 
336 			rcu_read_lock();
337 			idr_for_each(&file->object_idr, per_file_stats, &stats);
338 			rcu_read_unlock();
339 
340 			rcu_read_lock();
341 			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
342 			snprintf(name, sizeof(name), "%s",
343 				 task ? task->comm : "<unknown>");
344 			rcu_read_unlock();
345 
346 			print_file_stats(m, name, stats);
347 		}
348 
349 		spin_lock(&i915->gem.contexts.lock);
350 		list_safe_reset_next(ctx, cn, link);
351 		i915_gem_context_put(ctx);
352 	}
353 	spin_unlock(&i915->gem.contexts.lock);
354 
355 	print_file_stats(m, "[k]contexts", kstats);
356 }
357 
358 static int i915_gem_object_info(struct seq_file *m, void *data)
359 {
360 	struct drm_i915_private *i915 = node_to_i915(m->private);
361 	struct intel_memory_region *mr;
362 	enum intel_region_id id;
363 
364 	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
365 		   i915->mm.shrink_count,
366 		   atomic_read(&i915->mm.free_count),
367 		   i915->mm.shrink_memory);
368 	for_each_memory_region(mr, i915, id)
369 		seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
370 			   mr->name, &mr->total, &mr->avail);
371 	seq_putc(m, '\n');
372 
373 	print_context_stats(m, i915);
374 
375 	return 0;
376 }
377 
378 static void gen8_display_interrupt_info(struct seq_file *m)
379 {
380 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
381 	enum pipe pipe;
382 
383 	for_each_pipe(dev_priv, pipe) {
384 		enum intel_display_power_domain power_domain;
385 		intel_wakeref_t wakeref;
386 
387 		power_domain = POWER_DOMAIN_PIPE(pipe);
388 		wakeref = intel_display_power_get_if_enabled(dev_priv,
389 							     power_domain);
390 		if (!wakeref) {
391 			seq_printf(m, "Pipe %c power disabled\n",
392 				   pipe_name(pipe));
393 			continue;
394 		}
395 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
396 			   pipe_name(pipe),
397 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
398 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
399 			   pipe_name(pipe),
400 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
401 		seq_printf(m, "Pipe %c IER:\t%08x\n",
402 			   pipe_name(pipe),
403 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
404 
405 		intel_display_power_put(dev_priv, power_domain, wakeref);
406 	}
407 
408 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
409 		   I915_READ(GEN8_DE_PORT_IMR));
410 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
411 		   I915_READ(GEN8_DE_PORT_IIR));
412 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
413 		   I915_READ(GEN8_DE_PORT_IER));
414 
415 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
416 		   I915_READ(GEN8_DE_MISC_IMR));
417 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
418 		   I915_READ(GEN8_DE_MISC_IIR));
419 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
420 		   I915_READ(GEN8_DE_MISC_IER));
421 
422 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
423 		   I915_READ(GEN8_PCU_IMR));
424 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
425 		   I915_READ(GEN8_PCU_IIR));
426 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
427 		   I915_READ(GEN8_PCU_IER));
428 }
429 
430 static int i915_interrupt_info(struct seq_file *m, void *data)
431 {
432 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
433 	struct intel_engine_cs *engine;
434 	intel_wakeref_t wakeref;
435 	int i, pipe;
436 
437 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
438 
439 	if (IS_CHERRYVIEW(dev_priv)) {
440 		intel_wakeref_t pref;
441 
442 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
443 			   I915_READ(GEN8_MASTER_IRQ));
444 
445 		seq_printf(m, "Display IER:\t%08x\n",
446 			   I915_READ(VLV_IER));
447 		seq_printf(m, "Display IIR:\t%08x\n",
448 			   I915_READ(VLV_IIR));
449 		seq_printf(m, "Display IIR_RW:\t%08x\n",
450 			   I915_READ(VLV_IIR_RW));
451 		seq_printf(m, "Display IMR:\t%08x\n",
452 			   I915_READ(VLV_IMR));
453 		for_each_pipe(dev_priv, pipe) {
454 			enum intel_display_power_domain power_domain;
455 
456 			power_domain = POWER_DOMAIN_PIPE(pipe);
457 			pref = intel_display_power_get_if_enabled(dev_priv,
458 								  power_domain);
459 			if (!pref) {
460 				seq_printf(m, "Pipe %c power disabled\n",
461 					   pipe_name(pipe));
462 				continue;
463 			}
464 
465 			seq_printf(m, "Pipe %c stat:\t%08x\n",
466 				   pipe_name(pipe),
467 				   I915_READ(PIPESTAT(pipe)));
468 
469 			intel_display_power_put(dev_priv, power_domain, pref);
470 		}
471 
472 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
473 		seq_printf(m, "Port hotplug:\t%08x\n",
474 			   I915_READ(PORT_HOTPLUG_EN));
475 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
476 			   I915_READ(VLV_DPFLIPSTAT));
477 		seq_printf(m, "DPINVGTT:\t%08x\n",
478 			   I915_READ(DPINVGTT));
479 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
480 
481 		for (i = 0; i < 4; i++) {
482 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
483 				   i, I915_READ(GEN8_GT_IMR(i)));
484 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
485 				   i, I915_READ(GEN8_GT_IIR(i)));
486 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
487 				   i, I915_READ(GEN8_GT_IER(i)));
488 		}
489 
490 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
491 			   I915_READ(GEN8_PCU_IMR));
492 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
493 			   I915_READ(GEN8_PCU_IIR));
494 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
495 			   I915_READ(GEN8_PCU_IER));
496 	} else if (INTEL_GEN(dev_priv) >= 11) {
497 		seq_printf(m, "Master Interrupt Control:  %08x\n",
498 			   I915_READ(GEN11_GFX_MSTR_IRQ));
499 
500 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
501 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
502 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
503 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
504 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
505 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
506 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
507 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
508 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
509 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
510 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
511 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
512 
513 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
514 			   I915_READ(GEN11_DISPLAY_INT_CTL));
515 
516 		gen8_display_interrupt_info(m);
517 	} else if (INTEL_GEN(dev_priv) >= 8) {
518 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
519 			   I915_READ(GEN8_MASTER_IRQ));
520 
521 		for (i = 0; i < 4; i++) {
522 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
523 				   i, I915_READ(GEN8_GT_IMR(i)));
524 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
525 				   i, I915_READ(GEN8_GT_IIR(i)));
526 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
527 				   i, I915_READ(GEN8_GT_IER(i)));
528 		}
529 
530 		gen8_display_interrupt_info(m);
531 	} else if (IS_VALLEYVIEW(dev_priv)) {
532 		intel_wakeref_t pref;
533 
534 		seq_printf(m, "Display IER:\t%08x\n",
535 			   I915_READ(VLV_IER));
536 		seq_printf(m, "Display IIR:\t%08x\n",
537 			   I915_READ(VLV_IIR));
538 		seq_printf(m, "Display IIR_RW:\t%08x\n",
539 			   I915_READ(VLV_IIR_RW));
540 		seq_printf(m, "Display IMR:\t%08x\n",
541 			   I915_READ(VLV_IMR));
542 		for_each_pipe(dev_priv, pipe) {
543 			enum intel_display_power_domain power_domain;
544 
545 			power_domain = POWER_DOMAIN_PIPE(pipe);
546 			pref = intel_display_power_get_if_enabled(dev_priv,
547 								  power_domain);
548 			if (!pref) {
549 				seq_printf(m, "Pipe %c power disabled\n",
550 					   pipe_name(pipe));
551 				continue;
552 			}
553 
554 			seq_printf(m, "Pipe %c stat:\t%08x\n",
555 				   pipe_name(pipe),
556 				   I915_READ(PIPESTAT(pipe)));
557 			intel_display_power_put(dev_priv, power_domain, pref);
558 		}
559 
560 		seq_printf(m, "Master IER:\t%08x\n",
561 			   I915_READ(VLV_MASTER_IER));
562 
563 		seq_printf(m, "Render IER:\t%08x\n",
564 			   I915_READ(GTIER));
565 		seq_printf(m, "Render IIR:\t%08x\n",
566 			   I915_READ(GTIIR));
567 		seq_printf(m, "Render IMR:\t%08x\n",
568 			   I915_READ(GTIMR));
569 
570 		seq_printf(m, "PM IER:\t\t%08x\n",
571 			   I915_READ(GEN6_PMIER));
572 		seq_printf(m, "PM IIR:\t\t%08x\n",
573 			   I915_READ(GEN6_PMIIR));
574 		seq_printf(m, "PM IMR:\t\t%08x\n",
575 			   I915_READ(GEN6_PMIMR));
576 
577 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
578 		seq_printf(m, "Port hotplug:\t%08x\n",
579 			   I915_READ(PORT_HOTPLUG_EN));
580 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
581 			   I915_READ(VLV_DPFLIPSTAT));
582 		seq_printf(m, "DPINVGTT:\t%08x\n",
583 			   I915_READ(DPINVGTT));
584 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
585 
586 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
587 		seq_printf(m, "Interrupt enable:    %08x\n",
588 			   I915_READ(GEN2_IER));
589 		seq_printf(m, "Interrupt identity:  %08x\n",
590 			   I915_READ(GEN2_IIR));
591 		seq_printf(m, "Interrupt mask:      %08x\n",
592 			   I915_READ(GEN2_IMR));
593 		for_each_pipe(dev_priv, pipe)
594 			seq_printf(m, "Pipe %c stat:         %08x\n",
595 				   pipe_name(pipe),
596 				   I915_READ(PIPESTAT(pipe)));
597 	} else {
598 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
599 			   I915_READ(DEIER));
600 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
601 			   I915_READ(DEIIR));
602 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
603 			   I915_READ(DEIMR));
604 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
605 			   I915_READ(SDEIER));
606 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
607 			   I915_READ(SDEIIR));
608 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
609 			   I915_READ(SDEIMR));
610 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
611 			   I915_READ(GTIER));
612 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
613 			   I915_READ(GTIIR));
614 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
615 			   I915_READ(GTIMR));
616 	}
617 
618 	if (INTEL_GEN(dev_priv) >= 11) {
619 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
620 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
621 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
622 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
623 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
624 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
625 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
626 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
627 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
628 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
629 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
630 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
631 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
632 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
633 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
634 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
635 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
636 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
637 
638 	} else if (INTEL_GEN(dev_priv) >= 6) {
639 		for_each_uabi_engine(engine, dev_priv) {
640 			seq_printf(m,
641 				   "Graphics Interrupt mask (%s):	%08x\n",
642 				   engine->name, ENGINE_READ(engine, RING_IMR));
643 		}
644 	}
645 
646 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
647 
648 	return 0;
649 }
650 
651 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
652 {
653 	struct drm_i915_private *i915 = node_to_i915(m->private);
654 	unsigned int i;
655 
656 	seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
657 
658 	rcu_read_lock();
659 	for (i = 0; i < i915->ggtt.num_fences; i++) {
660 		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
661 		struct i915_vma *vma = reg->vma;
662 
663 		seq_printf(m, "Fence %d, pin count = %d, object = ",
664 			   i, atomic_read(&reg->pin_count));
665 		if (!vma)
666 			seq_puts(m, "unused");
667 		else
668 			i915_debugfs_describe_obj(m, vma->obj);
669 		seq_putc(m, '\n');
670 	}
671 	rcu_read_unlock();
672 
673 	return 0;
674 }
675 
676 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
677 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
678 			      size_t count, loff_t *pos)
679 {
680 	struct i915_gpu_coredump *error;
681 	ssize_t ret;
682 	void *buf;
683 
684 	error = file->private_data;
685 	if (!error)
686 		return 0;
687 
688 	/* Bounce buffer required because of kernfs __user API convenience. */
689 	buf = kmalloc(count, GFP_KERNEL);
690 	if (!buf)
691 		return -ENOMEM;
692 
693 	ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
694 	if (ret <= 0)
695 		goto out;
696 
697 	if (!copy_to_user(ubuf, buf, ret))
698 		*pos += ret;
699 	else
700 		ret = -EFAULT;
701 
702 out:
703 	kfree(buf);
704 	return ret;
705 }
706 
707 static int gpu_state_release(struct inode *inode, struct file *file)
708 {
709 	i915_gpu_coredump_put(file->private_data);
710 	return 0;
711 }
712 
713 static int i915_gpu_info_open(struct inode *inode, struct file *file)
714 {
715 	struct drm_i915_private *i915 = inode->i_private;
716 	struct i915_gpu_coredump *gpu;
717 	intel_wakeref_t wakeref;
718 
719 	gpu = NULL;
720 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
721 		gpu = i915_gpu_coredump(i915);
722 	if (IS_ERR(gpu))
723 		return PTR_ERR(gpu);
724 
725 	file->private_data = gpu;
726 	return 0;
727 }
728 
729 static const struct file_operations i915_gpu_info_fops = {
730 	.owner = THIS_MODULE,
731 	.open = i915_gpu_info_open,
732 	.read = gpu_state_read,
733 	.llseek = default_llseek,
734 	.release = gpu_state_release,
735 };
736 
737 static ssize_t
738 i915_error_state_write(struct file *filp,
739 		       const char __user *ubuf,
740 		       size_t cnt,
741 		       loff_t *ppos)
742 {
743 	struct i915_gpu_coredump *error = filp->private_data;
744 
745 	if (!error)
746 		return 0;
747 
748 	DRM_DEBUG_DRIVER("Resetting error state\n");
749 	i915_reset_error_state(error->i915);
750 
751 	return cnt;
752 }
753 
754 static int i915_error_state_open(struct inode *inode, struct file *file)
755 {
756 	struct i915_gpu_coredump *error;
757 
758 	error = i915_first_error_state(inode->i_private);
759 	if (IS_ERR(error))
760 		return PTR_ERR(error);
761 
762 	file->private_data  = error;
763 	return 0;
764 }
765 
766 static const struct file_operations i915_error_state_fops = {
767 	.owner = THIS_MODULE,
768 	.open = i915_error_state_open,
769 	.read = gpu_state_read,
770 	.write = i915_error_state_write,
771 	.llseek = default_llseek,
772 	.release = gpu_state_release,
773 };
774 #endif
775 
776 static int i915_frequency_info(struct seq_file *m, void *unused)
777 {
778 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
779 	struct intel_uncore *uncore = &dev_priv->uncore;
780 	struct intel_rps *rps = &dev_priv->gt.rps;
781 	intel_wakeref_t wakeref;
782 	int ret = 0;
783 
784 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
785 
786 	if (IS_GEN(dev_priv, 5)) {
787 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
788 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
789 
790 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
791 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
792 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
793 			   MEMSTAT_VID_SHIFT);
794 		seq_printf(m, "Current P-state: %d\n",
795 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
796 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
797 		u32 rpmodectl, freq_sts;
798 
799 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
800 		seq_printf(m, "Video Turbo Mode: %s\n",
801 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
802 		seq_printf(m, "HW control enabled: %s\n",
803 			   yesno(rpmodectl & GEN6_RP_ENABLE));
804 		seq_printf(m, "SW control enabled: %s\n",
805 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
806 				  GEN6_RP_MEDIA_SW_MODE));
807 
808 		vlv_punit_get(dev_priv);
809 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
810 		vlv_punit_put(dev_priv);
811 
812 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
813 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
814 
815 		seq_printf(m, "actual GPU freq: %d MHz\n",
816 			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
817 
818 		seq_printf(m, "current GPU freq: %d MHz\n",
819 			   intel_gpu_freq(rps, rps->cur_freq));
820 
821 		seq_printf(m, "max GPU freq: %d MHz\n",
822 			   intel_gpu_freq(rps, rps->max_freq));
823 
824 		seq_printf(m, "min GPU freq: %d MHz\n",
825 			   intel_gpu_freq(rps, rps->min_freq));
826 
827 		seq_printf(m, "idle GPU freq: %d MHz\n",
828 			   intel_gpu_freq(rps, rps->idle_freq));
829 
830 		seq_printf(m,
831 			   "efficient (RPe) frequency: %d MHz\n",
832 			   intel_gpu_freq(rps, rps->efficient_freq));
833 	} else if (INTEL_GEN(dev_priv) >= 6) {
834 		u32 rp_state_limits;
835 		u32 gt_perf_status;
836 		u32 rp_state_cap;
837 		u32 rpmodectl, rpinclimit, rpdeclimit;
838 		u32 rpstat, cagf, reqf;
839 		u32 rpupei, rpcurup, rpprevup;
840 		u32 rpdownei, rpcurdown, rpprevdown;
841 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
842 		int max_freq;
843 
844 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
845 		if (IS_GEN9_LP(dev_priv)) {
846 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
847 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
848 		} else {
849 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
850 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
851 		}
852 
853 		/* RPSTAT1 is in the GT power well */
854 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
855 
856 		reqf = I915_READ(GEN6_RPNSWREQ);
857 		if (INTEL_GEN(dev_priv) >= 9)
858 			reqf >>= 23;
859 		else {
860 			reqf &= ~GEN6_TURBO_DISABLE;
861 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
862 				reqf >>= 24;
863 			else
864 				reqf >>= 25;
865 		}
866 		reqf = intel_gpu_freq(rps, reqf);
867 
868 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
869 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
870 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
871 
872 		rpstat = I915_READ(GEN6_RPSTAT1);
873 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
874 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
875 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
876 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
877 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
878 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
879 		cagf = intel_rps_read_actual_frequency(rps);
880 
881 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
882 
883 		if (INTEL_GEN(dev_priv) >= 11) {
884 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
885 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
886 			/*
887 			 * The equivalent to the PM ISR & IIR cannot be read
888 			 * without affecting the current state of the system
889 			 */
890 			pm_isr = 0;
891 			pm_iir = 0;
892 		} else if (INTEL_GEN(dev_priv) >= 8) {
893 			pm_ier = I915_READ(GEN8_GT_IER(2));
894 			pm_imr = I915_READ(GEN8_GT_IMR(2));
895 			pm_isr = I915_READ(GEN8_GT_ISR(2));
896 			pm_iir = I915_READ(GEN8_GT_IIR(2));
897 		} else {
898 			pm_ier = I915_READ(GEN6_PMIER);
899 			pm_imr = I915_READ(GEN6_PMIMR);
900 			pm_isr = I915_READ(GEN6_PMISR);
901 			pm_iir = I915_READ(GEN6_PMIIR);
902 		}
903 		pm_mask = I915_READ(GEN6_PMINTRMSK);
904 
905 		seq_printf(m, "Video Turbo Mode: %s\n",
906 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
907 		seq_printf(m, "HW control enabled: %s\n",
908 			   yesno(rpmodectl & GEN6_RP_ENABLE));
909 		seq_printf(m, "SW control enabled: %s\n",
910 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
911 				  GEN6_RP_MEDIA_SW_MODE));
912 
913 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
914 			   pm_ier, pm_imr, pm_mask);
915 		if (INTEL_GEN(dev_priv) <= 10)
916 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
917 				   pm_isr, pm_iir);
918 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
919 			   rps->pm_intrmsk_mbz);
920 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
921 		seq_printf(m, "Render p-state ratio: %d\n",
922 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
923 		seq_printf(m, "Render p-state VID: %d\n",
924 			   gt_perf_status & 0xff);
925 		seq_printf(m, "Render p-state limit: %d\n",
926 			   rp_state_limits & 0xff);
927 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
928 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
929 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
930 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
931 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
932 		seq_printf(m, "CAGF: %dMHz\n", cagf);
933 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
934 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
935 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
936 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
937 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
938 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
939 		seq_printf(m, "Up threshold: %d%%\n",
940 			   rps->power.up_threshold);
941 
942 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
943 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
944 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
945 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
946 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
947 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
948 		seq_printf(m, "Down threshold: %d%%\n",
949 			   rps->power.down_threshold);
950 
951 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
952 			    rp_state_cap >> 16) & 0xff;
953 		max_freq *= (IS_GEN9_BC(dev_priv) ||
954 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
955 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
956 			   intel_gpu_freq(rps, max_freq));
957 
958 		max_freq = (rp_state_cap & 0xff00) >> 8;
959 		max_freq *= (IS_GEN9_BC(dev_priv) ||
960 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
961 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
962 			   intel_gpu_freq(rps, max_freq));
963 
964 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
965 			    rp_state_cap >> 0) & 0xff;
966 		max_freq *= (IS_GEN9_BC(dev_priv) ||
967 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
968 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
969 			   intel_gpu_freq(rps, max_freq));
970 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
971 			   intel_gpu_freq(rps, rps->max_freq));
972 
973 		seq_printf(m, "Current freq: %d MHz\n",
974 			   intel_gpu_freq(rps, rps->cur_freq));
975 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
976 		seq_printf(m, "Idle freq: %d MHz\n",
977 			   intel_gpu_freq(rps, rps->idle_freq));
978 		seq_printf(m, "Min freq: %d MHz\n",
979 			   intel_gpu_freq(rps, rps->min_freq));
980 		seq_printf(m, "Boost freq: %d MHz\n",
981 			   intel_gpu_freq(rps, rps->boost_freq));
982 		seq_printf(m, "Max freq: %d MHz\n",
983 			   intel_gpu_freq(rps, rps->max_freq));
984 		seq_printf(m,
985 			   "efficient (RPe) frequency: %d MHz\n",
986 			   intel_gpu_freq(rps, rps->efficient_freq));
987 	} else {
988 		seq_puts(m, "no P-state info available\n");
989 	}
990 
991 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
992 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
993 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
994 
995 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
996 	return ret;
997 }
998 
999 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1000 {
1001 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1002 	struct intel_rps *rps = &dev_priv->gt.rps;
1003 	unsigned int max_gpu_freq, min_gpu_freq;
1004 	intel_wakeref_t wakeref;
1005 	int gpu_freq, ia_freq;
1006 
1007 	if (!HAS_LLC(dev_priv))
1008 		return -ENODEV;
1009 
1010 	min_gpu_freq = rps->min_freq;
1011 	max_gpu_freq = rps->max_freq;
1012 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1013 		/* Convert GT frequency to 50 HZ units */
1014 		min_gpu_freq /= GEN9_FREQ_SCALER;
1015 		max_gpu_freq /= GEN9_FREQ_SCALER;
1016 	}
1017 
1018 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1019 
1020 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1021 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1022 		ia_freq = gpu_freq;
1023 		sandybridge_pcode_read(dev_priv,
1024 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1025 				       &ia_freq, NULL);
1026 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1027 			   intel_gpu_freq(rps,
1028 					  (gpu_freq *
1029 					   (IS_GEN9_BC(dev_priv) ||
1030 					    INTEL_GEN(dev_priv) >= 10 ?
1031 					    GEN9_FREQ_SCALER : 1))),
1032 			   ((ia_freq >> 0) & 0xff) * 100,
1033 			   ((ia_freq >> 8) & 0xff) * 100);
1034 	}
1035 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1036 
1037 	return 0;
1038 }
1039 
1040 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1041 {
1042 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1043 		   ring->space, ring->head, ring->tail, ring->emit);
1044 }
1045 
1046 static int i915_context_status(struct seq_file *m, void *unused)
1047 {
1048 	struct drm_i915_private *i915 = node_to_i915(m->private);
1049 	struct i915_gem_context *ctx, *cn;
1050 
1051 	spin_lock(&i915->gem.contexts.lock);
1052 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1053 		struct i915_gem_engines_iter it;
1054 		struct intel_context *ce;
1055 
1056 		if (!kref_get_unless_zero(&ctx->ref))
1057 			continue;
1058 
1059 		spin_unlock(&i915->gem.contexts.lock);
1060 
1061 		seq_puts(m, "HW context ");
1062 		if (ctx->pid) {
1063 			struct task_struct *task;
1064 
1065 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1066 			if (task) {
1067 				seq_printf(m, "(%s [%d]) ",
1068 					   task->comm, task->pid);
1069 				put_task_struct(task);
1070 			}
1071 		} else if (IS_ERR(ctx->file_priv)) {
1072 			seq_puts(m, "(deleted) ");
1073 		} else {
1074 			seq_puts(m, "(kernel) ");
1075 		}
1076 
1077 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1078 		seq_putc(m, '\n');
1079 
1080 		for_each_gem_engine(ce,
1081 				    i915_gem_context_lock_engines(ctx), it) {
1082 			if (intel_context_pin_if_active(ce)) {
1083 				seq_printf(m, "%s: ", ce->engine->name);
1084 				if (ce->state)
1085 					i915_debugfs_describe_obj(m, ce->state->obj);
1086 				describe_ctx_ring(m, ce->ring);
1087 				seq_putc(m, '\n');
1088 				intel_context_unpin(ce);
1089 			}
1090 		}
1091 		i915_gem_context_unlock_engines(ctx);
1092 
1093 		seq_putc(m, '\n');
1094 
1095 		spin_lock(&i915->gem.contexts.lock);
1096 		list_safe_reset_next(ctx, cn, link);
1097 		i915_gem_context_put(ctx);
1098 	}
1099 	spin_unlock(&i915->gem.contexts.lock);
1100 
1101 	return 0;
1102 }
1103 
1104 static const char *swizzle_string(unsigned swizzle)
1105 {
1106 	switch (swizzle) {
1107 	case I915_BIT_6_SWIZZLE_NONE:
1108 		return "none";
1109 	case I915_BIT_6_SWIZZLE_9:
1110 		return "bit9";
1111 	case I915_BIT_6_SWIZZLE_9_10:
1112 		return "bit9/bit10";
1113 	case I915_BIT_6_SWIZZLE_9_11:
1114 		return "bit9/bit11";
1115 	case I915_BIT_6_SWIZZLE_9_10_11:
1116 		return "bit9/bit10/bit11";
1117 	case I915_BIT_6_SWIZZLE_9_17:
1118 		return "bit9/bit17";
1119 	case I915_BIT_6_SWIZZLE_9_10_17:
1120 		return "bit9/bit10/bit17";
1121 	case I915_BIT_6_SWIZZLE_UNKNOWN:
1122 		return "unknown";
1123 	}
1124 
1125 	return "bug";
1126 }
1127 
1128 static int i915_swizzle_info(struct seq_file *m, void *data)
1129 {
1130 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1131 	struct intel_uncore *uncore = &dev_priv->uncore;
1132 	intel_wakeref_t wakeref;
1133 
1134 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1135 
1136 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1137 		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1138 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1139 		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1140 
1141 	if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1142 		seq_printf(m, "DDC = 0x%08x\n",
1143 			   intel_uncore_read(uncore, DCC));
1144 		seq_printf(m, "DDC2 = 0x%08x\n",
1145 			   intel_uncore_read(uncore, DCC2));
1146 		seq_printf(m, "C0DRB3 = 0x%04x\n",
1147 			   intel_uncore_read16(uncore, C0DRB3));
1148 		seq_printf(m, "C1DRB3 = 0x%04x\n",
1149 			   intel_uncore_read16(uncore, C1DRB3));
1150 	} else if (INTEL_GEN(dev_priv) >= 6) {
1151 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1152 			   intel_uncore_read(uncore, MAD_DIMM_C0));
1153 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1154 			   intel_uncore_read(uncore, MAD_DIMM_C1));
1155 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1156 			   intel_uncore_read(uncore, MAD_DIMM_C2));
1157 		seq_printf(m, "TILECTL = 0x%08x\n",
1158 			   intel_uncore_read(uncore, TILECTL));
1159 		if (INTEL_GEN(dev_priv) >= 8)
1160 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1161 				   intel_uncore_read(uncore, GAMTARBMODE));
1162 		else
1163 			seq_printf(m, "ARB_MODE = 0x%08x\n",
1164 				   intel_uncore_read(uncore, ARB_MODE));
1165 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1166 			   intel_uncore_read(uncore, DISP_ARB_CTL));
1167 	}
1168 
1169 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1170 		seq_puts(m, "L-shaped memory detected\n");
1171 
1172 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1173 
1174 	return 0;
1175 }
1176 
1177 static const char *rps_power_to_str(unsigned int power)
1178 {
1179 	static const char * const strings[] = {
1180 		[LOW_POWER] = "low power",
1181 		[BETWEEN] = "mixed",
1182 		[HIGH_POWER] = "high power",
1183 	};
1184 
1185 	if (power >= ARRAY_SIZE(strings) || !strings[power])
1186 		return "unknown";
1187 
1188 	return strings[power];
1189 }
1190 
1191 static int i915_rps_boost_info(struct seq_file *m, void *data)
1192 {
1193 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1194 	struct intel_rps *rps = &dev_priv->gt.rps;
1195 
1196 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1197 	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1198 	seq_printf(m, "Boosts outstanding? %d\n",
1199 		   atomic_read(&rps->num_waiters));
1200 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1201 	seq_printf(m, "Frequency requested %d, actual %d\n",
1202 		   intel_gpu_freq(rps, rps->cur_freq),
1203 		   intel_rps_read_actual_frequency(rps));
1204 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1205 		   intel_gpu_freq(rps, rps->min_freq),
1206 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
1207 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
1208 		   intel_gpu_freq(rps, rps->max_freq));
1209 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1210 		   intel_gpu_freq(rps, rps->idle_freq),
1211 		   intel_gpu_freq(rps, rps->efficient_freq),
1212 		   intel_gpu_freq(rps, rps->boost_freq));
1213 
1214 	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1215 
1216 	if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1217 		u32 rpup, rpupei;
1218 		u32 rpdown, rpdownei;
1219 
1220 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1221 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1222 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1223 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1224 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1225 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1226 
1227 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1228 			   rps_power_to_str(rps->power.mode));
1229 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1230 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
1231 			   rps->power.up_threshold);
1232 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1233 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1234 			   rps->power.down_threshold);
1235 	} else {
1236 		seq_puts(m, "\nRPS Autotuning inactive\n");
1237 	}
1238 
1239 	return 0;
1240 }
1241 
1242 static int i915_llc(struct seq_file *m, void *data)
1243 {
1244 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1245 	const bool edram = INTEL_GEN(dev_priv) > 8;
1246 
1247 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1248 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1249 		   dev_priv->edram_size_mb);
1250 
1251 	return 0;
1252 }
1253 
1254 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1255 {
1256 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1257 	intel_wakeref_t wakeref;
1258 	struct drm_printer p;
1259 
1260 	if (!HAS_GT_UC(dev_priv))
1261 		return -ENODEV;
1262 
1263 	p = drm_seq_file_printer(m);
1264 	intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1265 
1266 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1267 		seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1268 
1269 	return 0;
1270 }
1271 
1272 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1273 {
1274 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1275 	intel_wakeref_t wakeref;
1276 	struct drm_printer p;
1277 
1278 	if (!HAS_GT_UC(dev_priv))
1279 		return -ENODEV;
1280 
1281 	p = drm_seq_file_printer(m);
1282 	intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1283 
1284 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1285 		u32 tmp = I915_READ(GUC_STATUS);
1286 		u32 i;
1287 
1288 		seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1289 		seq_printf(m, "\tBootrom status = 0x%x\n",
1290 			   (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1291 		seq_printf(m, "\tuKernel status = 0x%x\n",
1292 			   (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1293 		seq_printf(m, "\tMIA Core status = 0x%x\n",
1294 			   (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1295 		seq_puts(m, "\nScratch registers:\n");
1296 		for (i = 0; i < 16; i++) {
1297 			seq_printf(m, "\t%2d: \t0x%x\n",
1298 				   i, I915_READ(SOFT_SCRATCH(i)));
1299 		}
1300 	}
1301 
1302 	return 0;
1303 }
1304 
1305 static const char *
1306 stringify_guc_log_type(enum guc_log_buffer_type type)
1307 {
1308 	switch (type) {
1309 	case GUC_ISR_LOG_BUFFER:
1310 		return "ISR";
1311 	case GUC_DPC_LOG_BUFFER:
1312 		return "DPC";
1313 	case GUC_CRASH_DUMP_LOG_BUFFER:
1314 		return "CRASH";
1315 	default:
1316 		MISSING_CASE(type);
1317 	}
1318 
1319 	return "";
1320 }
1321 
1322 static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
1323 {
1324 	enum guc_log_buffer_type type;
1325 
1326 	if (!intel_guc_log_relay_created(log)) {
1327 		seq_puts(m, "GuC log relay not created\n");
1328 		return;
1329 	}
1330 
1331 	seq_puts(m, "GuC logging stats:\n");
1332 
1333 	seq_printf(m, "\tRelay full count: %u\n",
1334 		   log->relay.full_count);
1335 
1336 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1337 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1338 			   stringify_guc_log_type(type),
1339 			   log->stats[type].flush,
1340 			   log->stats[type].sampled_overflow);
1341 	}
1342 }
1343 
1344 static int i915_guc_info(struct seq_file *m, void *data)
1345 {
1346 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1347 	struct intel_uc *uc = &dev_priv->gt.uc;
1348 
1349 	if (!intel_uc_uses_guc(uc))
1350 		return -ENODEV;
1351 
1352 	i915_guc_log_info(m, &uc->guc.log);
1353 
1354 	/* Add more as required ... */
1355 
1356 	return 0;
1357 }
1358 
1359 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1360 {
1361 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1362 	struct intel_uc *uc = &dev_priv->gt.uc;
1363 	struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
1364 	int index;
1365 
1366 	if (!intel_uc_uses_guc_submission(uc))
1367 		return -ENODEV;
1368 
1369 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1370 		struct intel_engine_cs *engine;
1371 
1372 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1373 			continue;
1374 
1375 		seq_printf(m, "GuC stage descriptor %u:\n", index);
1376 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1377 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1378 		seq_printf(m, "\tPriority: %d\n", desc->priority);
1379 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1380 		seq_printf(m, "\tEngines used: 0x%x\n",
1381 			   desc->engines_used);
1382 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1383 			   desc->db_trigger_phy,
1384 			   desc->db_trigger_cpu,
1385 			   desc->db_trigger_uk);
1386 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
1387 			   desc->process_desc);
1388 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1389 			   desc->wq_addr, desc->wq_size);
1390 		seq_putc(m, '\n');
1391 
1392 		for_each_uabi_engine(engine, dev_priv) {
1393 			u32 guc_engine_id = engine->guc_id;
1394 			struct guc_execlist_context *lrc =
1395 						&desc->lrc[guc_engine_id];
1396 
1397 			seq_printf(m, "\t%s LRC:\n", engine->name);
1398 			seq_printf(m, "\t\tContext desc: 0x%x\n",
1399 				   lrc->context_desc);
1400 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1401 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1402 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1403 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1404 			seq_putc(m, '\n');
1405 		}
1406 	}
1407 
1408 	return 0;
1409 }
1410 
1411 static int i915_guc_log_dump(struct seq_file *m, void *data)
1412 {
1413 	struct drm_info_node *node = m->private;
1414 	struct drm_i915_private *dev_priv = node_to_i915(node);
1415 	bool dump_load_err = !!node->info_ent->data;
1416 	struct drm_i915_gem_object *obj = NULL;
1417 	u32 *log;
1418 	int i = 0;
1419 
1420 	if (!HAS_GT_UC(dev_priv))
1421 		return -ENODEV;
1422 
1423 	if (dump_load_err)
1424 		obj = dev_priv->gt.uc.load_err_log;
1425 	else if (dev_priv->gt.uc.guc.log.vma)
1426 		obj = dev_priv->gt.uc.guc.log.vma->obj;
1427 
1428 	if (!obj)
1429 		return 0;
1430 
1431 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1432 	if (IS_ERR(log)) {
1433 		DRM_DEBUG("Failed to pin object\n");
1434 		seq_puts(m, "(log data unaccessible)\n");
1435 		return PTR_ERR(log);
1436 	}
1437 
1438 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1439 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1440 			   *(log + i), *(log + i + 1),
1441 			   *(log + i + 2), *(log + i + 3));
1442 
1443 	seq_putc(m, '\n');
1444 
1445 	i915_gem_object_unpin_map(obj);
1446 
1447 	return 0;
1448 }
1449 
1450 static int i915_guc_log_level_get(void *data, u64 *val)
1451 {
1452 	struct drm_i915_private *dev_priv = data;
1453 	struct intel_uc *uc = &dev_priv->gt.uc;
1454 
1455 	if (!intel_uc_uses_guc(uc))
1456 		return -ENODEV;
1457 
1458 	*val = intel_guc_log_get_level(&uc->guc.log);
1459 
1460 	return 0;
1461 }
1462 
1463 static int i915_guc_log_level_set(void *data, u64 val)
1464 {
1465 	struct drm_i915_private *dev_priv = data;
1466 	struct intel_uc *uc = &dev_priv->gt.uc;
1467 
1468 	if (!intel_uc_uses_guc(uc))
1469 		return -ENODEV;
1470 
1471 	return intel_guc_log_set_level(&uc->guc.log, val);
1472 }
1473 
1474 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
1475 			i915_guc_log_level_get, i915_guc_log_level_set,
1476 			"%lld\n");
1477 
1478 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
1479 {
1480 	struct drm_i915_private *i915 = inode->i_private;
1481 	struct intel_guc *guc = &i915->gt.uc.guc;
1482 	struct intel_guc_log *log = &guc->log;
1483 
1484 	if (!intel_guc_is_ready(guc))
1485 		return -ENODEV;
1486 
1487 	file->private_data = log;
1488 
1489 	return intel_guc_log_relay_open(log);
1490 }
1491 
1492 static ssize_t
1493 i915_guc_log_relay_write(struct file *filp,
1494 			 const char __user *ubuf,
1495 			 size_t cnt,
1496 			 loff_t *ppos)
1497 {
1498 	struct intel_guc_log *log = filp->private_data;
1499 	int val;
1500 	int ret;
1501 
1502 	ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
1503 	if (ret < 0)
1504 		return ret;
1505 
1506 	/*
1507 	 * Enable and start the guc log relay on value of 1.
1508 	 * Flush log relay for any other value.
1509 	 */
1510 	if (val == 1)
1511 		ret = intel_guc_log_relay_start(log);
1512 	else
1513 		intel_guc_log_relay_flush(log);
1514 
1515 	return ret ?: cnt;
1516 }
1517 
1518 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
1519 {
1520 	struct drm_i915_private *i915 = inode->i_private;
1521 	struct intel_guc *guc = &i915->gt.uc.guc;
1522 
1523 	intel_guc_log_relay_close(&guc->log);
1524 	return 0;
1525 }
1526 
1527 static const struct file_operations i915_guc_log_relay_fops = {
1528 	.owner = THIS_MODULE,
1529 	.open = i915_guc_log_relay_open,
1530 	.write = i915_guc_log_relay_write,
1531 	.release = i915_guc_log_relay_release,
1532 };
1533 
1534 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
1535 {
1536 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1537 	struct pci_dev *pdev = dev_priv->drm.pdev;
1538 
1539 	if (!HAS_RUNTIME_PM(dev_priv))
1540 		seq_puts(m, "Runtime power management not supported\n");
1541 
1542 	seq_printf(m, "Runtime power status: %s\n",
1543 		   enableddisabled(!dev_priv->power_domains.wakeref));
1544 
1545 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
1546 	seq_printf(m, "IRQs disabled: %s\n",
1547 		   yesno(!intel_irqs_enabled(dev_priv)));
1548 #ifdef CONFIG_PM
1549 	seq_printf(m, "Usage count: %d\n",
1550 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
1551 #else
1552 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
1553 #endif
1554 	seq_printf(m, "PCI device power state: %s [%d]\n",
1555 		   pci_power_name(pdev->current_state),
1556 		   pdev->current_state);
1557 
1558 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
1559 		struct drm_printer p = drm_seq_file_printer(m);
1560 
1561 		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
1562 	}
1563 
1564 	return 0;
1565 }
1566 
1567 static int i915_engine_info(struct seq_file *m, void *unused)
1568 {
1569 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1570 	struct intel_engine_cs *engine;
1571 	intel_wakeref_t wakeref;
1572 	struct drm_printer p;
1573 
1574 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1575 
1576 	seq_printf(m, "GT awake? %s [%d]\n",
1577 		   yesno(dev_priv->gt.awake),
1578 		   atomic_read(&dev_priv->gt.wakeref.count));
1579 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
1580 		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
1581 
1582 	p = drm_seq_file_printer(m);
1583 	for_each_uabi_engine(engine, dev_priv)
1584 		intel_engine_dump(engine, &p, "%s\n", engine->name);
1585 
1586 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1587 
1588 	return 0;
1589 }
1590 
1591 static int i915_rcs_topology(struct seq_file *m, void *unused)
1592 {
1593 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1594 	struct drm_printer p = drm_seq_file_printer(m);
1595 
1596 	intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
1597 
1598 	return 0;
1599 }
1600 
1601 static int i915_shrinker_info(struct seq_file *m, void *unused)
1602 {
1603 	struct drm_i915_private *i915 = node_to_i915(m->private);
1604 
1605 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
1606 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
1607 
1608 	return 0;
1609 }
1610 
1611 static int i915_wa_registers(struct seq_file *m, void *unused)
1612 {
1613 	struct drm_i915_private *i915 = node_to_i915(m->private);
1614 	struct intel_engine_cs *engine;
1615 
1616 	for_each_uabi_engine(engine, i915) {
1617 		const struct i915_wa_list *wal = &engine->ctx_wa_list;
1618 		const struct i915_wa *wa;
1619 		unsigned int count;
1620 
1621 		count = wal->count;
1622 		if (!count)
1623 			continue;
1624 
1625 		seq_printf(m, "%s: Workarounds applied: %u\n",
1626 			   engine->name, count);
1627 
1628 		for (wa = wal->list; count--; wa++)
1629 			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
1630 				   i915_mmio_reg_offset(wa->reg),
1631 				   wa->set, wa->clr);
1632 
1633 		seq_printf(m, "\n");
1634 	}
1635 
1636 	return 0;
1637 }
1638 
1639 static int
1640 i915_wedged_get(void *data, u64 *val)
1641 {
1642 	struct drm_i915_private *i915 = data;
1643 	int ret = intel_gt_terminally_wedged(&i915->gt);
1644 
1645 	switch (ret) {
1646 	case -EIO:
1647 		*val = 1;
1648 		return 0;
1649 	case 0:
1650 		*val = 0;
1651 		return 0;
1652 	default:
1653 		return ret;
1654 	}
1655 }
1656 
1657 static int
1658 i915_wedged_set(void *data, u64 val)
1659 {
1660 	struct drm_i915_private *i915 = data;
1661 
1662 	/* Flush any previous reset before applying for a new one */
1663 	wait_event(i915->gt.reset.queue,
1664 		   !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
1665 
1666 	intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
1667 			      "Manually set wedged engine mask = %llx", val);
1668 	return 0;
1669 }
1670 
1671 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
1672 			i915_wedged_get, i915_wedged_set,
1673 			"%llu\n");
1674 
1675 static int
1676 i915_perf_noa_delay_set(void *data, u64 val)
1677 {
1678 	struct drm_i915_private *i915 = data;
1679 	const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
1680 
1681 	/*
1682 	 * This would lead to infinite waits as we're doing timestamp
1683 	 * difference on the CS with only 32bits.
1684 	 */
1685 	if (val > mul_u32_u32(U32_MAX, clk))
1686 		return -EINVAL;
1687 
1688 	atomic64_set(&i915->perf.noa_programming_delay, val);
1689 	return 0;
1690 }
1691 
1692 static int
1693 i915_perf_noa_delay_get(void *data, u64 *val)
1694 {
1695 	struct drm_i915_private *i915 = data;
1696 
1697 	*val = atomic64_read(&i915->perf.noa_programming_delay);
1698 	return 0;
1699 }
1700 
1701 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
1702 			i915_perf_noa_delay_get,
1703 			i915_perf_noa_delay_set,
1704 			"%llu\n");
1705 
1706 #define DROP_UNBOUND	BIT(0)
1707 #define DROP_BOUND	BIT(1)
1708 #define DROP_RETIRE	BIT(2)
1709 #define DROP_ACTIVE	BIT(3)
1710 #define DROP_FREED	BIT(4)
1711 #define DROP_SHRINK_ALL	BIT(5)
1712 #define DROP_IDLE	BIT(6)
1713 #define DROP_RESET_ACTIVE	BIT(7)
1714 #define DROP_RESET_SEQNO	BIT(8)
1715 #define DROP_RCU	BIT(9)
1716 #define DROP_ALL (DROP_UNBOUND	| \
1717 		  DROP_BOUND	| \
1718 		  DROP_RETIRE	| \
1719 		  DROP_ACTIVE	| \
1720 		  DROP_FREED	| \
1721 		  DROP_SHRINK_ALL |\
1722 		  DROP_IDLE	| \
1723 		  DROP_RESET_ACTIVE | \
1724 		  DROP_RESET_SEQNO | \
1725 		  DROP_RCU)
1726 static int
1727 i915_drop_caches_get(void *data, u64 *val)
1728 {
1729 	*val = DROP_ALL;
1730 
1731 	return 0;
1732 }
1733 static int
1734 gt_drop_caches(struct intel_gt *gt, u64 val)
1735 {
1736 	int ret;
1737 
1738 	if (val & DROP_RESET_ACTIVE &&
1739 	    wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
1740 		intel_gt_set_wedged(gt);
1741 
1742 	if (val & DROP_RETIRE)
1743 		intel_gt_retire_requests(gt);
1744 
1745 	if (val & (DROP_IDLE | DROP_ACTIVE)) {
1746 		ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1747 		if (ret)
1748 			return ret;
1749 	}
1750 
1751 	if (val & DROP_IDLE) {
1752 		ret = intel_gt_pm_wait_for_idle(gt);
1753 		if (ret)
1754 			return ret;
1755 	}
1756 
1757 	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
1758 		intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
1759 
1760 	return 0;
1761 }
1762 
1763 static int
1764 i915_drop_caches_set(void *data, u64 val)
1765 {
1766 	struct drm_i915_private *i915 = data;
1767 	int ret;
1768 
1769 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
1770 		  val, val & DROP_ALL);
1771 
1772 	ret = gt_drop_caches(&i915->gt, val);
1773 	if (ret)
1774 		return ret;
1775 
1776 	fs_reclaim_acquire(GFP_KERNEL);
1777 	if (val & DROP_BOUND)
1778 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
1779 
1780 	if (val & DROP_UNBOUND)
1781 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
1782 
1783 	if (val & DROP_SHRINK_ALL)
1784 		i915_gem_shrink_all(i915);
1785 	fs_reclaim_release(GFP_KERNEL);
1786 
1787 	if (val & DROP_RCU)
1788 		rcu_barrier();
1789 
1790 	if (val & DROP_FREED)
1791 		i915_gem_drain_freed_objects(i915);
1792 
1793 	return 0;
1794 }
1795 
1796 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
1797 			i915_drop_caches_get, i915_drop_caches_set,
1798 			"0x%08llx\n");
1799 
1800 static int
1801 i915_cache_sharing_get(void *data, u64 *val)
1802 {
1803 	struct drm_i915_private *dev_priv = data;
1804 	intel_wakeref_t wakeref;
1805 	u32 snpcr = 0;
1806 
1807 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
1808 		return -ENODEV;
1809 
1810 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1811 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1812 
1813 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
1814 
1815 	return 0;
1816 }
1817 
1818 static int
1819 i915_cache_sharing_set(void *data, u64 val)
1820 {
1821 	struct drm_i915_private *dev_priv = data;
1822 	intel_wakeref_t wakeref;
1823 
1824 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
1825 		return -ENODEV;
1826 
1827 	if (val > 3)
1828 		return -EINVAL;
1829 
1830 	drm_dbg(&dev_priv->drm,
1831 		"Manually setting uncore sharing to %llu\n", val);
1832 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1833 		u32 snpcr;
1834 
1835 		/* Update the cache sharing policy here as well */
1836 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
1837 		snpcr &= ~GEN6_MBC_SNPCR_MASK;
1838 		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
1839 		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
1840 	}
1841 
1842 	return 0;
1843 }
1844 
1845 static void
1846 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
1847 			  u8 *to_mask)
1848 {
1849 	int offset = slice * sseu->ss_stride;
1850 
1851 	memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
1852 }
1853 
1854 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
1855 			i915_cache_sharing_get, i915_cache_sharing_set,
1856 			"%llu\n");
1857 
1858 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
1859 					  struct sseu_dev_info *sseu)
1860 {
1861 #define SS_MAX 2
1862 	const int ss_max = SS_MAX;
1863 	u32 sig1[SS_MAX], sig2[SS_MAX];
1864 	int ss;
1865 
1866 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
1867 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
1868 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
1869 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
1870 
1871 	for (ss = 0; ss < ss_max; ss++) {
1872 		unsigned int eu_cnt;
1873 
1874 		if (sig1[ss] & CHV_SS_PG_ENABLE)
1875 			/* skip disabled subslice */
1876 			continue;
1877 
1878 		sseu->slice_mask = BIT(0);
1879 		sseu->subslice_mask[0] |= BIT(ss);
1880 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
1881 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
1882 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
1883 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
1884 		sseu->eu_total += eu_cnt;
1885 		sseu->eu_per_subslice = max_t(unsigned int,
1886 					      sseu->eu_per_subslice, eu_cnt);
1887 	}
1888 #undef SS_MAX
1889 }
1890 
1891 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
1892 				     struct sseu_dev_info *sseu)
1893 {
1894 #define SS_MAX 6
1895 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1896 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
1897 	int s, ss;
1898 
1899 	for (s = 0; s < info->sseu.max_slices; s++) {
1900 		/*
1901 		 * FIXME: Valid SS Mask respects the spec and read
1902 		 * only valid bits for those registers, excluding reserved
1903 		 * although this seems wrong because it would leave many
1904 		 * subslices without ACK.
1905 		 */
1906 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
1907 			GEN10_PGCTL_VALID_SS_MASK(s);
1908 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
1909 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
1910 	}
1911 
1912 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
1913 		     GEN9_PGCTL_SSA_EU19_ACK |
1914 		     GEN9_PGCTL_SSA_EU210_ACK |
1915 		     GEN9_PGCTL_SSA_EU311_ACK;
1916 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
1917 		     GEN9_PGCTL_SSB_EU19_ACK |
1918 		     GEN9_PGCTL_SSB_EU210_ACK |
1919 		     GEN9_PGCTL_SSB_EU311_ACK;
1920 
1921 	for (s = 0; s < info->sseu.max_slices; s++) {
1922 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
1923 			/* skip disabled slice */
1924 			continue;
1925 
1926 		sseu->slice_mask |= BIT(s);
1927 		intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
1928 
1929 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
1930 			unsigned int eu_cnt;
1931 
1932 			if (info->sseu.has_subslice_pg &&
1933 			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
1934 				/* skip disabled subslice */
1935 				continue;
1936 
1937 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
1938 					       eu_mask[ss % 2]);
1939 			sseu->eu_total += eu_cnt;
1940 			sseu->eu_per_subslice = max_t(unsigned int,
1941 						      sseu->eu_per_subslice,
1942 						      eu_cnt);
1943 		}
1944 	}
1945 #undef SS_MAX
1946 }
1947 
1948 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
1949 				    struct sseu_dev_info *sseu)
1950 {
1951 #define SS_MAX 3
1952 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
1953 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
1954 	int s, ss;
1955 
1956 	for (s = 0; s < info->sseu.max_slices; s++) {
1957 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
1958 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
1959 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
1960 	}
1961 
1962 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
1963 		     GEN9_PGCTL_SSA_EU19_ACK |
1964 		     GEN9_PGCTL_SSA_EU210_ACK |
1965 		     GEN9_PGCTL_SSA_EU311_ACK;
1966 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
1967 		     GEN9_PGCTL_SSB_EU19_ACK |
1968 		     GEN9_PGCTL_SSB_EU210_ACK |
1969 		     GEN9_PGCTL_SSB_EU311_ACK;
1970 
1971 	for (s = 0; s < info->sseu.max_slices; s++) {
1972 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
1973 			/* skip disabled slice */
1974 			continue;
1975 
1976 		sseu->slice_mask |= BIT(s);
1977 
1978 		if (IS_GEN9_BC(dev_priv))
1979 			intel_sseu_copy_subslices(&info->sseu, s,
1980 						  sseu->subslice_mask);
1981 
1982 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
1983 			unsigned int eu_cnt;
1984 			u8 ss_idx = s * info->sseu.ss_stride +
1985 				    ss / BITS_PER_BYTE;
1986 
1987 			if (IS_GEN9_LP(dev_priv)) {
1988 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
1989 					/* skip disabled subslice */
1990 					continue;
1991 
1992 				sseu->subslice_mask[ss_idx] |=
1993 					BIT(ss % BITS_PER_BYTE);
1994 			}
1995 
1996 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
1997 					       eu_mask[ss%2]);
1998 			sseu->eu_total += eu_cnt;
1999 			sseu->eu_per_subslice = max_t(unsigned int,
2000 						      sseu->eu_per_subslice,
2001 						      eu_cnt);
2002 		}
2003 	}
2004 #undef SS_MAX
2005 }
2006 
2007 static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
2008 				   struct sseu_dev_info *sseu)
2009 {
2010 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2011 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
2012 	int s;
2013 
2014 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
2015 
2016 	if (sseu->slice_mask) {
2017 		sseu->eu_per_subslice = info->sseu.eu_per_subslice;
2018 		for (s = 0; s < fls(sseu->slice_mask); s++)
2019 			intel_sseu_copy_subslices(&info->sseu, s,
2020 						  sseu->subslice_mask);
2021 		sseu->eu_total = sseu->eu_per_subslice *
2022 				 intel_sseu_subslice_total(sseu);
2023 
2024 		/* subtract fused off EU(s) from enabled slice(s) */
2025 		for (s = 0; s < fls(sseu->slice_mask); s++) {
2026 			u8 subslice_7eu = info->sseu.subslice_7eu[s];
2027 
2028 			sseu->eu_total -= hweight8(subslice_7eu);
2029 		}
2030 	}
2031 }
2032 
2033 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
2034 				 const struct sseu_dev_info *sseu)
2035 {
2036 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2037 	const char *type = is_available_info ? "Available" : "Enabled";
2038 	int s;
2039 
2040 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
2041 		   sseu->slice_mask);
2042 	seq_printf(m, "  %s Slice Total: %u\n", type,
2043 		   hweight8(sseu->slice_mask));
2044 	seq_printf(m, "  %s Subslice Total: %u\n", type,
2045 		   intel_sseu_subslice_total(sseu));
2046 	for (s = 0; s < fls(sseu->slice_mask); s++) {
2047 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
2048 			   s, intel_sseu_subslices_per_slice(sseu, s));
2049 	}
2050 	seq_printf(m, "  %s EU Total: %u\n", type,
2051 		   sseu->eu_total);
2052 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
2053 		   sseu->eu_per_subslice);
2054 
2055 	if (!is_available_info)
2056 		return;
2057 
2058 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
2059 	if (HAS_POOLED_EU(dev_priv))
2060 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
2061 
2062 	seq_printf(m, "  Has Slice Power Gating: %s\n",
2063 		   yesno(sseu->has_slice_pg));
2064 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
2065 		   yesno(sseu->has_subslice_pg));
2066 	seq_printf(m, "  Has EU Power Gating: %s\n",
2067 		   yesno(sseu->has_eu_pg));
2068 }
2069 
2070 static int i915_sseu_status(struct seq_file *m, void *unused)
2071 {
2072 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2073 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
2074 	struct sseu_dev_info sseu;
2075 	intel_wakeref_t wakeref;
2076 
2077 	if (INTEL_GEN(dev_priv) < 8)
2078 		return -ENODEV;
2079 
2080 	seq_puts(m, "SSEU Device Info\n");
2081 	i915_print_sseu_info(m, true, &info->sseu);
2082 
2083 	seq_puts(m, "SSEU Device Status\n");
2084 	memset(&sseu, 0, sizeof(sseu));
2085 	intel_sseu_set_info(&sseu, info->sseu.max_slices,
2086 			    info->sseu.max_subslices,
2087 			    info->sseu.max_eus_per_subslice);
2088 
2089 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2090 		if (IS_CHERRYVIEW(dev_priv))
2091 			cherryview_sseu_device_status(dev_priv, &sseu);
2092 		else if (IS_BROADWELL(dev_priv))
2093 			bdw_sseu_device_status(dev_priv, &sseu);
2094 		else if (IS_GEN(dev_priv, 9))
2095 			gen9_sseu_device_status(dev_priv, &sseu);
2096 		else if (INTEL_GEN(dev_priv) >= 10)
2097 			gen10_sseu_device_status(dev_priv, &sseu);
2098 	}
2099 
2100 	i915_print_sseu_info(m, false, &sseu);
2101 
2102 	return 0;
2103 }
2104 
2105 static int i915_forcewake_open(struct inode *inode, struct file *file)
2106 {
2107 	struct drm_i915_private *i915 = inode->i_private;
2108 	struct intel_gt *gt = &i915->gt;
2109 
2110 	atomic_inc(&gt->user_wakeref);
2111 	intel_gt_pm_get(gt);
2112 	if (INTEL_GEN(i915) >= 6)
2113 		intel_uncore_forcewake_user_get(gt->uncore);
2114 
2115 	return 0;
2116 }
2117 
2118 static int i915_forcewake_release(struct inode *inode, struct file *file)
2119 {
2120 	struct drm_i915_private *i915 = inode->i_private;
2121 	struct intel_gt *gt = &i915->gt;
2122 
2123 	if (INTEL_GEN(i915) >= 6)
2124 		intel_uncore_forcewake_user_put(&i915->uncore);
2125 	intel_gt_pm_put(gt);
2126 	atomic_dec(&gt->user_wakeref);
2127 
2128 	return 0;
2129 }
2130 
2131 static const struct file_operations i915_forcewake_fops = {
2132 	.owner = THIS_MODULE,
2133 	.open = i915_forcewake_open,
2134 	.release = i915_forcewake_release,
2135 };
2136 
2137 static const struct drm_info_list i915_debugfs_list[] = {
2138 	{"i915_capabilities", i915_capabilities, 0},
2139 	{"i915_gem_objects", i915_gem_object_info, 0},
2140 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
2141 	{"i915_gem_interrupt", i915_interrupt_info, 0},
2142 	{"i915_guc_info", i915_guc_info, 0},
2143 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
2144 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
2145 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
2146 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
2147 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
2148 	{"i915_frequency_info", i915_frequency_info, 0},
2149 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
2150 	{"i915_context_status", i915_context_status, 0},
2151 	{"i915_swizzle_info", i915_swizzle_info, 0},
2152 	{"i915_llc", i915_llc, 0},
2153 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
2154 	{"i915_engine_info", i915_engine_info, 0},
2155 	{"i915_rcs_topology", i915_rcs_topology, 0},
2156 	{"i915_shrinker_info", i915_shrinker_info, 0},
2157 	{"i915_wa_registers", i915_wa_registers, 0},
2158 	{"i915_sseu_status", i915_sseu_status, 0},
2159 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
2160 };
2161 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
2162 
2163 static const struct i915_debugfs_files {
2164 	const char *name;
2165 	const struct file_operations *fops;
2166 } i915_debugfs_files[] = {
2167 	{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
2168 	{"i915_wedged", &i915_wedged_fops},
2169 	{"i915_cache_sharing", &i915_cache_sharing_fops},
2170 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
2171 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
2172 	{"i915_error_state", &i915_error_state_fops},
2173 	{"i915_gpu_info", &i915_gpu_info_fops},
2174 #endif
2175 	{"i915_guc_log_level", &i915_guc_log_level_fops},
2176 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
2177 };
2178 
2179 int i915_debugfs_register(struct drm_i915_private *dev_priv)
2180 {
2181 	struct drm_minor *minor = dev_priv->drm.primary;
2182 	int i;
2183 
2184 	i915_debugfs_params(dev_priv);
2185 
2186 	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
2187 			    to_i915(minor->dev), &i915_forcewake_fops);
2188 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
2189 		debugfs_create_file(i915_debugfs_files[i].name,
2190 				    S_IRUGO | S_IWUSR,
2191 				    minor->debugfs_root,
2192 				    to_i915(minor->dev),
2193 				    i915_debugfs_files[i].fops);
2194 	}
2195 
2196 	return drm_debugfs_create_files(i915_debugfs_list,
2197 					I915_DEBUGFS_ENTRIES,
2198 					minor->debugfs_root, minor);
2199 }
2200