1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/sched/mm.h> 30 #include <linux/sort.h> 31 32 #include <drm/drm_debugfs.h> 33 34 #include "gem/i915_gem_context.h" 35 #include "gt/intel_gt_buffer_pool.h" 36 #include "gt/intel_gt_clock_utils.h" 37 #include "gt/intel_gt.h" 38 #include "gt/intel_gt_pm.h" 39 #include "gt/intel_gt_requests.h" 40 #include "gt/intel_reset.h" 41 #include "gt/intel_rc6.h" 42 #include "gt/intel_rps.h" 43 #include "gt/intel_sseu_debugfs.h" 44 45 #include "i915_debugfs.h" 46 #include "i915_debugfs_params.h" 47 #include "i915_irq.h" 48 #include "i915_trace.h" 49 #include "intel_pm.h" 50 #include "intel_sideband.h" 51 52 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 53 { 54 return to_i915(node->minor->dev); 55 } 56 57 static int i915_capabilities(struct seq_file *m, void *data) 58 { 59 struct drm_i915_private *i915 = node_to_i915(m->private); 60 struct drm_printer p = drm_seq_file_printer(m); 61 62 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915)); 63 64 intel_device_info_print_static(INTEL_INFO(i915), &p); 65 intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); 66 intel_gt_info_print(&i915->gt.info, &p); 67 intel_driver_caps_print(&i915->caps, &p); 68 69 kernel_param_lock(THIS_MODULE); 70 i915_params_dump(&i915->params, &p); 71 kernel_param_unlock(THIS_MODULE); 72 73 return 0; 74 } 75 76 static char get_tiling_flag(struct drm_i915_gem_object *obj) 77 { 78 switch (i915_gem_object_get_tiling(obj)) { 79 default: 80 case I915_TILING_NONE: return ' '; 81 case I915_TILING_X: return 'X'; 82 case I915_TILING_Y: return 'Y'; 83 } 84 } 85 86 static char get_global_flag(struct drm_i915_gem_object *obj) 87 { 88 return READ_ONCE(obj->userfault_count) ? 'g' : ' '; 89 } 90 91 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 92 { 93 return obj->mm.mapping ? 'M' : ' '; 94 } 95 96 static const char * 97 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len) 98 { 99 size_t x = 0; 100 101 switch (page_sizes) { 102 case 0: 103 return ""; 104 case I915_GTT_PAGE_SIZE_4K: 105 return "4K"; 106 case I915_GTT_PAGE_SIZE_64K: 107 return "64K"; 108 case I915_GTT_PAGE_SIZE_2M: 109 return "2M"; 110 default: 111 if (!buf) 112 return "M"; 113 114 if (page_sizes & I915_GTT_PAGE_SIZE_2M) 115 x += snprintf(buf + x, len - x, "2M, "); 116 if (page_sizes & I915_GTT_PAGE_SIZE_64K) 117 x += snprintf(buf + x, len - x, "64K, "); 118 if (page_sizes & I915_GTT_PAGE_SIZE_4K) 119 x += snprintf(buf + x, len - x, "4K, "); 120 buf[x-2] = '\0'; 121 122 return buf; 123 } 124 } 125 126 void 127 i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 128 { 129 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 130 struct intel_engine_cs *engine; 131 struct i915_vma *vma; 132 int pin_count = 0; 133 134 seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s", 135 &obj->base, 136 get_tiling_flag(obj), 137 get_global_flag(obj), 138 get_pin_mapped_flag(obj), 139 obj->base.size / 1024, 140 obj->read_domains, 141 obj->write_domain, 142 i915_cache_level_str(dev_priv, obj->cache_level), 143 obj->mm.dirty ? " dirty" : "", 144 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); 145 if (obj->base.name) 146 seq_printf(m, " (name: %d)", obj->base.name); 147 148 spin_lock(&obj->vma.lock); 149 list_for_each_entry(vma, &obj->vma.list, obj_link) { 150 if (!drm_mm_node_allocated(&vma->node)) 151 continue; 152 153 spin_unlock(&obj->vma.lock); 154 155 if (i915_vma_is_pinned(vma)) 156 pin_count++; 157 158 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s", 159 i915_vma_is_ggtt(vma) ? "g" : "pp", 160 vma->node.start, vma->node.size, 161 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0)); 162 if (i915_vma_is_ggtt(vma)) { 163 switch (vma->ggtt_view.type) { 164 case I915_GGTT_VIEW_NORMAL: 165 seq_puts(m, ", normal"); 166 break; 167 168 case I915_GGTT_VIEW_PARTIAL: 169 seq_printf(m, ", partial [%08llx+%x]", 170 vma->ggtt_view.partial.offset << PAGE_SHIFT, 171 vma->ggtt_view.partial.size << PAGE_SHIFT); 172 break; 173 174 case I915_GGTT_VIEW_ROTATED: 175 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", 176 vma->ggtt_view.rotated.plane[0].width, 177 vma->ggtt_view.rotated.plane[0].height, 178 vma->ggtt_view.rotated.plane[0].stride, 179 vma->ggtt_view.rotated.plane[0].offset, 180 vma->ggtt_view.rotated.plane[1].width, 181 vma->ggtt_view.rotated.plane[1].height, 182 vma->ggtt_view.rotated.plane[1].stride, 183 vma->ggtt_view.rotated.plane[1].offset); 184 break; 185 186 case I915_GGTT_VIEW_REMAPPED: 187 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", 188 vma->ggtt_view.remapped.plane[0].width, 189 vma->ggtt_view.remapped.plane[0].height, 190 vma->ggtt_view.remapped.plane[0].stride, 191 vma->ggtt_view.remapped.plane[0].offset, 192 vma->ggtt_view.remapped.plane[1].width, 193 vma->ggtt_view.remapped.plane[1].height, 194 vma->ggtt_view.remapped.plane[1].stride, 195 vma->ggtt_view.remapped.plane[1].offset); 196 break; 197 198 default: 199 MISSING_CASE(vma->ggtt_view.type); 200 break; 201 } 202 } 203 if (vma->fence) 204 seq_printf(m, " , fence: %d", vma->fence->id); 205 seq_puts(m, ")"); 206 207 spin_lock(&obj->vma.lock); 208 } 209 spin_unlock(&obj->vma.lock); 210 211 seq_printf(m, " (pinned x %d)", pin_count); 212 if (obj->stolen) 213 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 214 if (i915_gem_object_is_framebuffer(obj)) 215 seq_printf(m, " (fb)"); 216 217 engine = i915_gem_object_last_write_engine(obj); 218 if (engine) 219 seq_printf(m, " (%s)", engine->name); 220 } 221 222 struct file_stats { 223 struct i915_address_space *vm; 224 unsigned long count; 225 u64 total; 226 u64 active, inactive; 227 u64 closed; 228 }; 229 230 static int per_file_stats(int id, void *ptr, void *data) 231 { 232 struct drm_i915_gem_object *obj = ptr; 233 struct file_stats *stats = data; 234 struct i915_vma *vma; 235 236 if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount)) 237 return 0; 238 239 stats->count++; 240 stats->total += obj->base.size; 241 242 spin_lock(&obj->vma.lock); 243 if (!stats->vm) { 244 for_each_ggtt_vma(vma, obj) { 245 if (!drm_mm_node_allocated(&vma->node)) 246 continue; 247 248 if (i915_vma_is_active(vma)) 249 stats->active += vma->node.size; 250 else 251 stats->inactive += vma->node.size; 252 253 if (i915_vma_is_closed(vma)) 254 stats->closed += vma->node.size; 255 } 256 } else { 257 struct rb_node *p = obj->vma.tree.rb_node; 258 259 while (p) { 260 long cmp; 261 262 vma = rb_entry(p, typeof(*vma), obj_node); 263 cmp = i915_vma_compare(vma, stats->vm, NULL); 264 if (cmp == 0) { 265 if (drm_mm_node_allocated(&vma->node)) { 266 if (i915_vma_is_active(vma)) 267 stats->active += vma->node.size; 268 else 269 stats->inactive += vma->node.size; 270 271 if (i915_vma_is_closed(vma)) 272 stats->closed += vma->node.size; 273 } 274 break; 275 } 276 if (cmp < 0) 277 p = p->rb_right; 278 else 279 p = p->rb_left; 280 } 281 } 282 spin_unlock(&obj->vma.lock); 283 284 i915_gem_object_put(obj); 285 return 0; 286 } 287 288 #define print_file_stats(m, name, stats) do { \ 289 if (stats.count) \ 290 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu closed)\n", \ 291 name, \ 292 stats.count, \ 293 stats.total, \ 294 stats.active, \ 295 stats.inactive, \ 296 stats.closed); \ 297 } while (0) 298 299 static void print_context_stats(struct seq_file *m, 300 struct drm_i915_private *i915) 301 { 302 struct file_stats kstats = {}; 303 struct i915_gem_context *ctx, *cn; 304 305 spin_lock(&i915->gem.contexts.lock); 306 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { 307 struct i915_gem_engines_iter it; 308 struct intel_context *ce; 309 310 if (!kref_get_unless_zero(&ctx->ref)) 311 continue; 312 313 spin_unlock(&i915->gem.contexts.lock); 314 315 for_each_gem_engine(ce, 316 i915_gem_context_lock_engines(ctx), it) { 317 if (intel_context_pin_if_active(ce)) { 318 rcu_read_lock(); 319 if (ce->state) 320 per_file_stats(0, 321 ce->state->obj, &kstats); 322 per_file_stats(0, ce->ring->vma->obj, &kstats); 323 rcu_read_unlock(); 324 intel_context_unpin(ce); 325 } 326 } 327 i915_gem_context_unlock_engines(ctx); 328 329 if (!IS_ERR_OR_NULL(ctx->file_priv)) { 330 struct file_stats stats = { 331 .vm = rcu_access_pointer(ctx->vm), 332 }; 333 struct drm_file *file = ctx->file_priv->file; 334 struct task_struct *task; 335 char name[80]; 336 337 rcu_read_lock(); 338 idr_for_each(&file->object_idr, per_file_stats, &stats); 339 rcu_read_unlock(); 340 341 rcu_read_lock(); 342 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID); 343 snprintf(name, sizeof(name), "%s", 344 task ? task->comm : "<unknown>"); 345 rcu_read_unlock(); 346 347 print_file_stats(m, name, stats); 348 } 349 350 spin_lock(&i915->gem.contexts.lock); 351 list_safe_reset_next(ctx, cn, link); 352 i915_gem_context_put(ctx); 353 } 354 spin_unlock(&i915->gem.contexts.lock); 355 356 print_file_stats(m, "[k]contexts", kstats); 357 } 358 359 static int i915_gem_object_info(struct seq_file *m, void *data) 360 { 361 struct drm_i915_private *i915 = node_to_i915(m->private); 362 struct intel_memory_region *mr; 363 enum intel_region_id id; 364 365 seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n", 366 i915->mm.shrink_count, 367 atomic_read(&i915->mm.free_count), 368 i915->mm.shrink_memory); 369 for_each_memory_region(mr, i915, id) 370 seq_printf(m, "%s: total:%pa, available:%pa bytes\n", 371 mr->name, &mr->total, &mr->avail); 372 seq_putc(m, '\n'); 373 374 print_context_stats(m, i915); 375 376 return 0; 377 } 378 379 static void gen8_display_interrupt_info(struct seq_file *m) 380 { 381 struct drm_i915_private *dev_priv = node_to_i915(m->private); 382 enum pipe pipe; 383 384 for_each_pipe(dev_priv, pipe) { 385 enum intel_display_power_domain power_domain; 386 intel_wakeref_t wakeref; 387 388 power_domain = POWER_DOMAIN_PIPE(pipe); 389 wakeref = intel_display_power_get_if_enabled(dev_priv, 390 power_domain); 391 if (!wakeref) { 392 seq_printf(m, "Pipe %c power disabled\n", 393 pipe_name(pipe)); 394 continue; 395 } 396 seq_printf(m, "Pipe %c IMR:\t%08x\n", 397 pipe_name(pipe), 398 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 399 seq_printf(m, "Pipe %c IIR:\t%08x\n", 400 pipe_name(pipe), 401 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 402 seq_printf(m, "Pipe %c IER:\t%08x\n", 403 pipe_name(pipe), 404 I915_READ(GEN8_DE_PIPE_IER(pipe))); 405 406 intel_display_power_put(dev_priv, power_domain, wakeref); 407 } 408 409 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 410 I915_READ(GEN8_DE_PORT_IMR)); 411 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 412 I915_READ(GEN8_DE_PORT_IIR)); 413 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 414 I915_READ(GEN8_DE_PORT_IER)); 415 416 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 417 I915_READ(GEN8_DE_MISC_IMR)); 418 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 419 I915_READ(GEN8_DE_MISC_IIR)); 420 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 421 I915_READ(GEN8_DE_MISC_IER)); 422 423 seq_printf(m, "PCU interrupt mask:\t%08x\n", 424 I915_READ(GEN8_PCU_IMR)); 425 seq_printf(m, "PCU interrupt identity:\t%08x\n", 426 I915_READ(GEN8_PCU_IIR)); 427 seq_printf(m, "PCU interrupt enable:\t%08x\n", 428 I915_READ(GEN8_PCU_IER)); 429 } 430 431 static int i915_interrupt_info(struct seq_file *m, void *data) 432 { 433 struct drm_i915_private *dev_priv = node_to_i915(m->private); 434 struct intel_engine_cs *engine; 435 intel_wakeref_t wakeref; 436 int i, pipe; 437 438 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 439 440 if (IS_CHERRYVIEW(dev_priv)) { 441 intel_wakeref_t pref; 442 443 seq_printf(m, "Master Interrupt Control:\t%08x\n", 444 I915_READ(GEN8_MASTER_IRQ)); 445 446 seq_printf(m, "Display IER:\t%08x\n", 447 I915_READ(VLV_IER)); 448 seq_printf(m, "Display IIR:\t%08x\n", 449 I915_READ(VLV_IIR)); 450 seq_printf(m, "Display IIR_RW:\t%08x\n", 451 I915_READ(VLV_IIR_RW)); 452 seq_printf(m, "Display IMR:\t%08x\n", 453 I915_READ(VLV_IMR)); 454 for_each_pipe(dev_priv, pipe) { 455 enum intel_display_power_domain power_domain; 456 457 power_domain = POWER_DOMAIN_PIPE(pipe); 458 pref = intel_display_power_get_if_enabled(dev_priv, 459 power_domain); 460 if (!pref) { 461 seq_printf(m, "Pipe %c power disabled\n", 462 pipe_name(pipe)); 463 continue; 464 } 465 466 seq_printf(m, "Pipe %c stat:\t%08x\n", 467 pipe_name(pipe), 468 I915_READ(PIPESTAT(pipe))); 469 470 intel_display_power_put(dev_priv, power_domain, pref); 471 } 472 473 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 474 seq_printf(m, "Port hotplug:\t%08x\n", 475 I915_READ(PORT_HOTPLUG_EN)); 476 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 477 I915_READ(VLV_DPFLIPSTAT)); 478 seq_printf(m, "DPINVGTT:\t%08x\n", 479 I915_READ(DPINVGTT)); 480 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref); 481 482 for (i = 0; i < 4; i++) { 483 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 484 i, I915_READ(GEN8_GT_IMR(i))); 485 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 486 i, I915_READ(GEN8_GT_IIR(i))); 487 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 488 i, I915_READ(GEN8_GT_IER(i))); 489 } 490 491 seq_printf(m, "PCU interrupt mask:\t%08x\n", 492 I915_READ(GEN8_PCU_IMR)); 493 seq_printf(m, "PCU interrupt identity:\t%08x\n", 494 I915_READ(GEN8_PCU_IIR)); 495 seq_printf(m, "PCU interrupt enable:\t%08x\n", 496 I915_READ(GEN8_PCU_IER)); 497 } else if (INTEL_GEN(dev_priv) >= 11) { 498 if (HAS_MASTER_UNIT_IRQ(dev_priv)) 499 seq_printf(m, "Master Unit Interrupt Control: %08x\n", 500 I915_READ(DG1_MSTR_UNIT_INTR)); 501 502 seq_printf(m, "Master Interrupt Control: %08x\n", 503 I915_READ(GEN11_GFX_MSTR_IRQ)); 504 505 seq_printf(m, "Render/Copy Intr Enable: %08x\n", 506 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE)); 507 seq_printf(m, "VCS/VECS Intr Enable: %08x\n", 508 I915_READ(GEN11_VCS_VECS_INTR_ENABLE)); 509 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n", 510 I915_READ(GEN11_GUC_SG_INTR_ENABLE)); 511 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n", 512 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE)); 513 seq_printf(m, "Crypto Intr Enable:\t %08x\n", 514 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE)); 515 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n", 516 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE)); 517 518 seq_printf(m, "Display Interrupt Control:\t%08x\n", 519 I915_READ(GEN11_DISPLAY_INT_CTL)); 520 521 gen8_display_interrupt_info(m); 522 } else if (INTEL_GEN(dev_priv) >= 8) { 523 seq_printf(m, "Master Interrupt Control:\t%08x\n", 524 I915_READ(GEN8_MASTER_IRQ)); 525 526 for (i = 0; i < 4; i++) { 527 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 528 i, I915_READ(GEN8_GT_IMR(i))); 529 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 530 i, I915_READ(GEN8_GT_IIR(i))); 531 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 532 i, I915_READ(GEN8_GT_IER(i))); 533 } 534 535 gen8_display_interrupt_info(m); 536 } else if (IS_VALLEYVIEW(dev_priv)) { 537 intel_wakeref_t pref; 538 539 seq_printf(m, "Display IER:\t%08x\n", 540 I915_READ(VLV_IER)); 541 seq_printf(m, "Display IIR:\t%08x\n", 542 I915_READ(VLV_IIR)); 543 seq_printf(m, "Display IIR_RW:\t%08x\n", 544 I915_READ(VLV_IIR_RW)); 545 seq_printf(m, "Display IMR:\t%08x\n", 546 I915_READ(VLV_IMR)); 547 for_each_pipe(dev_priv, pipe) { 548 enum intel_display_power_domain power_domain; 549 550 power_domain = POWER_DOMAIN_PIPE(pipe); 551 pref = intel_display_power_get_if_enabled(dev_priv, 552 power_domain); 553 if (!pref) { 554 seq_printf(m, "Pipe %c power disabled\n", 555 pipe_name(pipe)); 556 continue; 557 } 558 559 seq_printf(m, "Pipe %c stat:\t%08x\n", 560 pipe_name(pipe), 561 I915_READ(PIPESTAT(pipe))); 562 intel_display_power_put(dev_priv, power_domain, pref); 563 } 564 565 seq_printf(m, "Master IER:\t%08x\n", 566 I915_READ(VLV_MASTER_IER)); 567 568 seq_printf(m, "Render IER:\t%08x\n", 569 I915_READ(GTIER)); 570 seq_printf(m, "Render IIR:\t%08x\n", 571 I915_READ(GTIIR)); 572 seq_printf(m, "Render IMR:\t%08x\n", 573 I915_READ(GTIMR)); 574 575 seq_printf(m, "PM IER:\t\t%08x\n", 576 I915_READ(GEN6_PMIER)); 577 seq_printf(m, "PM IIR:\t\t%08x\n", 578 I915_READ(GEN6_PMIIR)); 579 seq_printf(m, "PM IMR:\t\t%08x\n", 580 I915_READ(GEN6_PMIMR)); 581 582 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 583 seq_printf(m, "Port hotplug:\t%08x\n", 584 I915_READ(PORT_HOTPLUG_EN)); 585 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 586 I915_READ(VLV_DPFLIPSTAT)); 587 seq_printf(m, "DPINVGTT:\t%08x\n", 588 I915_READ(DPINVGTT)); 589 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref); 590 591 } else if (!HAS_PCH_SPLIT(dev_priv)) { 592 seq_printf(m, "Interrupt enable: %08x\n", 593 I915_READ(GEN2_IER)); 594 seq_printf(m, "Interrupt identity: %08x\n", 595 I915_READ(GEN2_IIR)); 596 seq_printf(m, "Interrupt mask: %08x\n", 597 I915_READ(GEN2_IMR)); 598 for_each_pipe(dev_priv, pipe) 599 seq_printf(m, "Pipe %c stat: %08x\n", 600 pipe_name(pipe), 601 I915_READ(PIPESTAT(pipe))); 602 } else { 603 seq_printf(m, "North Display Interrupt enable: %08x\n", 604 I915_READ(DEIER)); 605 seq_printf(m, "North Display Interrupt identity: %08x\n", 606 I915_READ(DEIIR)); 607 seq_printf(m, "North Display Interrupt mask: %08x\n", 608 I915_READ(DEIMR)); 609 seq_printf(m, "South Display Interrupt enable: %08x\n", 610 I915_READ(SDEIER)); 611 seq_printf(m, "South Display Interrupt identity: %08x\n", 612 I915_READ(SDEIIR)); 613 seq_printf(m, "South Display Interrupt mask: %08x\n", 614 I915_READ(SDEIMR)); 615 seq_printf(m, "Graphics Interrupt enable: %08x\n", 616 I915_READ(GTIER)); 617 seq_printf(m, "Graphics Interrupt identity: %08x\n", 618 I915_READ(GTIIR)); 619 seq_printf(m, "Graphics Interrupt mask: %08x\n", 620 I915_READ(GTIMR)); 621 } 622 623 if (INTEL_GEN(dev_priv) >= 11) { 624 seq_printf(m, "RCS Intr Mask:\t %08x\n", 625 I915_READ(GEN11_RCS0_RSVD_INTR_MASK)); 626 seq_printf(m, "BCS Intr Mask:\t %08x\n", 627 I915_READ(GEN11_BCS_RSVD_INTR_MASK)); 628 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n", 629 I915_READ(GEN11_VCS0_VCS1_INTR_MASK)); 630 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n", 631 I915_READ(GEN11_VCS2_VCS3_INTR_MASK)); 632 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n", 633 I915_READ(GEN11_VECS0_VECS1_INTR_MASK)); 634 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n", 635 I915_READ(GEN11_GUC_SG_INTR_MASK)); 636 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n", 637 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK)); 638 seq_printf(m, "Crypto Intr Mask:\t %08x\n", 639 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK)); 640 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n", 641 I915_READ(GEN11_GUNIT_CSME_INTR_MASK)); 642 643 } else if (INTEL_GEN(dev_priv) >= 6) { 644 for_each_uabi_engine(engine, dev_priv) { 645 seq_printf(m, 646 "Graphics Interrupt mask (%s): %08x\n", 647 engine->name, ENGINE_READ(engine, RING_IMR)); 648 } 649 } 650 651 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 652 653 return 0; 654 } 655 656 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 657 { 658 struct drm_i915_private *i915 = node_to_i915(m->private); 659 unsigned int i; 660 661 seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences); 662 663 rcu_read_lock(); 664 for (i = 0; i < i915->ggtt.num_fences; i++) { 665 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i]; 666 struct i915_vma *vma = reg->vma; 667 668 seq_printf(m, "Fence %d, pin count = %d, object = ", 669 i, atomic_read(®->pin_count)); 670 if (!vma) 671 seq_puts(m, "unused"); 672 else 673 i915_debugfs_describe_obj(m, vma->obj); 674 seq_putc(m, '\n'); 675 } 676 rcu_read_unlock(); 677 678 return 0; 679 } 680 681 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 682 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 683 size_t count, loff_t *pos) 684 { 685 struct i915_gpu_coredump *error; 686 ssize_t ret; 687 void *buf; 688 689 error = file->private_data; 690 if (!error) 691 return 0; 692 693 /* Bounce buffer required because of kernfs __user API convenience. */ 694 buf = kmalloc(count, GFP_KERNEL); 695 if (!buf) 696 return -ENOMEM; 697 698 ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count); 699 if (ret <= 0) 700 goto out; 701 702 if (!copy_to_user(ubuf, buf, ret)) 703 *pos += ret; 704 else 705 ret = -EFAULT; 706 707 out: 708 kfree(buf); 709 return ret; 710 } 711 712 static int gpu_state_release(struct inode *inode, struct file *file) 713 { 714 i915_gpu_coredump_put(file->private_data); 715 return 0; 716 } 717 718 static int i915_gpu_info_open(struct inode *inode, struct file *file) 719 { 720 struct drm_i915_private *i915 = inode->i_private; 721 struct i915_gpu_coredump *gpu; 722 intel_wakeref_t wakeref; 723 724 gpu = NULL; 725 with_intel_runtime_pm(&i915->runtime_pm, wakeref) 726 gpu = i915_gpu_coredump(i915); 727 if (IS_ERR(gpu)) 728 return PTR_ERR(gpu); 729 730 file->private_data = gpu; 731 return 0; 732 } 733 734 static const struct file_operations i915_gpu_info_fops = { 735 .owner = THIS_MODULE, 736 .open = i915_gpu_info_open, 737 .read = gpu_state_read, 738 .llseek = default_llseek, 739 .release = gpu_state_release, 740 }; 741 742 static ssize_t 743 i915_error_state_write(struct file *filp, 744 const char __user *ubuf, 745 size_t cnt, 746 loff_t *ppos) 747 { 748 struct i915_gpu_coredump *error = filp->private_data; 749 750 if (!error) 751 return 0; 752 753 drm_dbg(&error->i915->drm, "Resetting error state\n"); 754 i915_reset_error_state(error->i915); 755 756 return cnt; 757 } 758 759 static int i915_error_state_open(struct inode *inode, struct file *file) 760 { 761 struct i915_gpu_coredump *error; 762 763 error = i915_first_error_state(inode->i_private); 764 if (IS_ERR(error)) 765 return PTR_ERR(error); 766 767 file->private_data = error; 768 return 0; 769 } 770 771 static const struct file_operations i915_error_state_fops = { 772 .owner = THIS_MODULE, 773 .open = i915_error_state_open, 774 .read = gpu_state_read, 775 .write = i915_error_state_write, 776 .llseek = default_llseek, 777 .release = gpu_state_release, 778 }; 779 #endif 780 781 static int i915_frequency_info(struct seq_file *m, void *unused) 782 { 783 struct drm_i915_private *dev_priv = node_to_i915(m->private); 784 struct intel_uncore *uncore = &dev_priv->uncore; 785 struct intel_rps *rps = &dev_priv->gt.rps; 786 intel_wakeref_t wakeref; 787 int ret = 0; 788 789 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 790 791 if (IS_GEN(dev_priv, 5)) { 792 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 793 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK); 794 795 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 796 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 797 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 798 MEMSTAT_VID_SHIFT); 799 seq_printf(m, "Current P-state: %d\n", 800 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 801 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 802 u32 rpmodectl, freq_sts; 803 804 rpmodectl = I915_READ(GEN6_RP_CONTROL); 805 seq_printf(m, "Video Turbo Mode: %s\n", 806 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 807 seq_printf(m, "HW control enabled: %s\n", 808 yesno(rpmodectl & GEN6_RP_ENABLE)); 809 seq_printf(m, "SW control enabled: %s\n", 810 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 811 GEN6_RP_MEDIA_SW_MODE)); 812 813 vlv_punit_get(dev_priv); 814 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 815 vlv_punit_put(dev_priv); 816 817 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 818 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 819 820 seq_printf(m, "actual GPU freq: %d MHz\n", 821 intel_gpu_freq(rps, (freq_sts >> 8) & 0xff)); 822 823 seq_printf(m, "current GPU freq: %d MHz\n", 824 intel_gpu_freq(rps, rps->cur_freq)); 825 826 seq_printf(m, "max GPU freq: %d MHz\n", 827 intel_gpu_freq(rps, rps->max_freq)); 828 829 seq_printf(m, "min GPU freq: %d MHz\n", 830 intel_gpu_freq(rps, rps->min_freq)); 831 832 seq_printf(m, "idle GPU freq: %d MHz\n", 833 intel_gpu_freq(rps, rps->idle_freq)); 834 835 seq_printf(m, 836 "efficient (RPe) frequency: %d MHz\n", 837 intel_gpu_freq(rps, rps->efficient_freq)); 838 } else if (INTEL_GEN(dev_priv) >= 6) { 839 u32 rp_state_limits; 840 u32 gt_perf_status; 841 u32 rp_state_cap; 842 u32 rpmodectl, rpinclimit, rpdeclimit; 843 u32 rpstat, cagf, reqf; 844 u32 rpupei, rpcurup, rpprevup; 845 u32 rpdownei, rpcurdown, rpprevdown; 846 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 847 int max_freq; 848 849 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 850 if (IS_GEN9_LP(dev_priv)) { 851 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 852 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 853 } else { 854 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 855 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 856 } 857 858 /* RPSTAT1 is in the GT power well */ 859 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 860 861 reqf = I915_READ(GEN6_RPNSWREQ); 862 if (INTEL_GEN(dev_priv) >= 9) 863 reqf >>= 23; 864 else { 865 reqf &= ~GEN6_TURBO_DISABLE; 866 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 867 reqf >>= 24; 868 else 869 reqf >>= 25; 870 } 871 reqf = intel_gpu_freq(rps, reqf); 872 873 rpmodectl = I915_READ(GEN6_RP_CONTROL); 874 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 875 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 876 877 rpstat = I915_READ(GEN6_RPSTAT1); 878 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 879 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 880 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 881 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 882 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 883 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 884 cagf = intel_rps_read_actual_frequency(rps); 885 886 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 887 888 if (INTEL_GEN(dev_priv) >= 11) { 889 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE); 890 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK); 891 /* 892 * The equivalent to the PM ISR & IIR cannot be read 893 * without affecting the current state of the system 894 */ 895 pm_isr = 0; 896 pm_iir = 0; 897 } else if (INTEL_GEN(dev_priv) >= 8) { 898 pm_ier = I915_READ(GEN8_GT_IER(2)); 899 pm_imr = I915_READ(GEN8_GT_IMR(2)); 900 pm_isr = I915_READ(GEN8_GT_ISR(2)); 901 pm_iir = I915_READ(GEN8_GT_IIR(2)); 902 } else { 903 pm_ier = I915_READ(GEN6_PMIER); 904 pm_imr = I915_READ(GEN6_PMIMR); 905 pm_isr = I915_READ(GEN6_PMISR); 906 pm_iir = I915_READ(GEN6_PMIIR); 907 } 908 pm_mask = I915_READ(GEN6_PMINTRMSK); 909 910 seq_printf(m, "Video Turbo Mode: %s\n", 911 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 912 seq_printf(m, "HW control enabled: %s\n", 913 yesno(rpmodectl & GEN6_RP_ENABLE)); 914 seq_printf(m, "SW control enabled: %s\n", 915 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 916 GEN6_RP_MEDIA_SW_MODE)); 917 918 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", 919 pm_ier, pm_imr, pm_mask); 920 if (INTEL_GEN(dev_priv) <= 10) 921 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n", 922 pm_isr, pm_iir); 923 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", 924 rps->pm_intrmsk_mbz); 925 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 926 seq_printf(m, "Render p-state ratio: %d\n", 927 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 928 seq_printf(m, "Render p-state VID: %d\n", 929 gt_perf_status & 0xff); 930 seq_printf(m, "Render p-state limit: %d\n", 931 rp_state_limits & 0xff); 932 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 933 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 934 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 935 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 936 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 937 seq_printf(m, "CAGF: %dMHz\n", cagf); 938 seq_printf(m, "RP CUR UP EI: %d (%dns)\n", 939 rpupei, 940 intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei)); 941 seq_printf(m, "RP CUR UP: %d (%dun)\n", 942 rpcurup, 943 intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup)); 944 seq_printf(m, "RP PREV UP: %d (%dns)\n", 945 rpprevup, 946 intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup)); 947 seq_printf(m, "Up threshold: %d%%\n", 948 rps->power.up_threshold); 949 950 seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n", 951 rpdownei, 952 intel_gt_pm_interval_to_ns(&dev_priv->gt, 953 rpdownei)); 954 seq_printf(m, "RP CUR DOWN: %d (%dns)\n", 955 rpcurdown, 956 intel_gt_pm_interval_to_ns(&dev_priv->gt, 957 rpcurdown)); 958 seq_printf(m, "RP PREV DOWN: %d (%dns)\n", 959 rpprevdown, 960 intel_gt_pm_interval_to_ns(&dev_priv->gt, 961 rpprevdown)); 962 seq_printf(m, "Down threshold: %d%%\n", 963 rps->power.down_threshold); 964 965 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : 966 rp_state_cap >> 16) & 0xff; 967 max_freq *= (IS_GEN9_BC(dev_priv) || 968 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); 969 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 970 intel_gpu_freq(rps, max_freq)); 971 972 max_freq = (rp_state_cap & 0xff00) >> 8; 973 max_freq *= (IS_GEN9_BC(dev_priv) || 974 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); 975 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 976 intel_gpu_freq(rps, max_freq)); 977 978 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : 979 rp_state_cap >> 0) & 0xff; 980 max_freq *= (IS_GEN9_BC(dev_priv) || 981 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); 982 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 983 intel_gpu_freq(rps, max_freq)); 984 seq_printf(m, "Max overclocked frequency: %dMHz\n", 985 intel_gpu_freq(rps, rps->max_freq)); 986 987 seq_printf(m, "Current freq: %d MHz\n", 988 intel_gpu_freq(rps, rps->cur_freq)); 989 seq_printf(m, "Actual freq: %d MHz\n", cagf); 990 seq_printf(m, "Idle freq: %d MHz\n", 991 intel_gpu_freq(rps, rps->idle_freq)); 992 seq_printf(m, "Min freq: %d MHz\n", 993 intel_gpu_freq(rps, rps->min_freq)); 994 seq_printf(m, "Boost freq: %d MHz\n", 995 intel_gpu_freq(rps, rps->boost_freq)); 996 seq_printf(m, "Max freq: %d MHz\n", 997 intel_gpu_freq(rps, rps->max_freq)); 998 seq_printf(m, 999 "efficient (RPe) frequency: %d MHz\n", 1000 intel_gpu_freq(rps, rps->efficient_freq)); 1001 } else { 1002 seq_puts(m, "no P-state info available\n"); 1003 } 1004 1005 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk); 1006 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1007 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1008 1009 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1010 return ret; 1011 } 1012 1013 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1014 { 1015 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1016 struct intel_rps *rps = &dev_priv->gt.rps; 1017 unsigned int max_gpu_freq, min_gpu_freq; 1018 intel_wakeref_t wakeref; 1019 int gpu_freq, ia_freq; 1020 1021 if (!HAS_LLC(dev_priv)) 1022 return -ENODEV; 1023 1024 min_gpu_freq = rps->min_freq; 1025 max_gpu_freq = rps->max_freq; 1026 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) { 1027 /* Convert GT frequency to 50 HZ units */ 1028 min_gpu_freq /= GEN9_FREQ_SCALER; 1029 max_gpu_freq /= GEN9_FREQ_SCALER; 1030 } 1031 1032 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1033 1034 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1035 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1036 ia_freq = gpu_freq; 1037 sandybridge_pcode_read(dev_priv, 1038 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1039 &ia_freq, NULL); 1040 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1041 intel_gpu_freq(rps, 1042 (gpu_freq * 1043 (IS_GEN9_BC(dev_priv) || 1044 INTEL_GEN(dev_priv) >= 10 ? 1045 GEN9_FREQ_SCALER : 1))), 1046 ((ia_freq >> 0) & 0xff) * 100, 1047 ((ia_freq >> 8) & 0xff) * 100); 1048 } 1049 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1050 1051 return 0; 1052 } 1053 1054 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) 1055 { 1056 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)", 1057 ring->space, ring->head, ring->tail, ring->emit); 1058 } 1059 1060 static int i915_context_status(struct seq_file *m, void *unused) 1061 { 1062 struct drm_i915_private *i915 = node_to_i915(m->private); 1063 struct i915_gem_context *ctx, *cn; 1064 1065 spin_lock(&i915->gem.contexts.lock); 1066 list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { 1067 struct i915_gem_engines_iter it; 1068 struct intel_context *ce; 1069 1070 if (!kref_get_unless_zero(&ctx->ref)) 1071 continue; 1072 1073 spin_unlock(&i915->gem.contexts.lock); 1074 1075 seq_puts(m, "HW context "); 1076 if (ctx->pid) { 1077 struct task_struct *task; 1078 1079 task = get_pid_task(ctx->pid, PIDTYPE_PID); 1080 if (task) { 1081 seq_printf(m, "(%s [%d]) ", 1082 task->comm, task->pid); 1083 put_task_struct(task); 1084 } 1085 } else if (IS_ERR(ctx->file_priv)) { 1086 seq_puts(m, "(deleted) "); 1087 } else { 1088 seq_puts(m, "(kernel) "); 1089 } 1090 1091 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 1092 seq_putc(m, '\n'); 1093 1094 for_each_gem_engine(ce, 1095 i915_gem_context_lock_engines(ctx), it) { 1096 if (intel_context_pin_if_active(ce)) { 1097 seq_printf(m, "%s: ", ce->engine->name); 1098 if (ce->state) 1099 i915_debugfs_describe_obj(m, ce->state->obj); 1100 describe_ctx_ring(m, ce->ring); 1101 seq_putc(m, '\n'); 1102 intel_context_unpin(ce); 1103 } 1104 } 1105 i915_gem_context_unlock_engines(ctx); 1106 1107 seq_putc(m, '\n'); 1108 1109 spin_lock(&i915->gem.contexts.lock); 1110 list_safe_reset_next(ctx, cn, link); 1111 i915_gem_context_put(ctx); 1112 } 1113 spin_unlock(&i915->gem.contexts.lock); 1114 1115 return 0; 1116 } 1117 1118 static const char *swizzle_string(unsigned swizzle) 1119 { 1120 switch (swizzle) { 1121 case I915_BIT_6_SWIZZLE_NONE: 1122 return "none"; 1123 case I915_BIT_6_SWIZZLE_9: 1124 return "bit9"; 1125 case I915_BIT_6_SWIZZLE_9_10: 1126 return "bit9/bit10"; 1127 case I915_BIT_6_SWIZZLE_9_11: 1128 return "bit9/bit11"; 1129 case I915_BIT_6_SWIZZLE_9_10_11: 1130 return "bit9/bit10/bit11"; 1131 case I915_BIT_6_SWIZZLE_9_17: 1132 return "bit9/bit17"; 1133 case I915_BIT_6_SWIZZLE_9_10_17: 1134 return "bit9/bit10/bit17"; 1135 case I915_BIT_6_SWIZZLE_UNKNOWN: 1136 return "unknown"; 1137 } 1138 1139 return "bug"; 1140 } 1141 1142 static int i915_swizzle_info(struct seq_file *m, void *data) 1143 { 1144 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1145 struct intel_uncore *uncore = &dev_priv->uncore; 1146 intel_wakeref_t wakeref; 1147 1148 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1149 swizzle_string(dev_priv->ggtt.bit_6_swizzle_x)); 1150 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1151 swizzle_string(dev_priv->ggtt.bit_6_swizzle_y)); 1152 1153 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 1154 seq_puts(m, "L-shaped memory detected\n"); 1155 1156 /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */ 1157 if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) 1158 return 0; 1159 1160 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1161 1162 if (IS_GEN_RANGE(dev_priv, 3, 4)) { 1163 seq_printf(m, "DDC = 0x%08x\n", 1164 intel_uncore_read(uncore, DCC)); 1165 seq_printf(m, "DDC2 = 0x%08x\n", 1166 intel_uncore_read(uncore, DCC2)); 1167 seq_printf(m, "C0DRB3 = 0x%04x\n", 1168 intel_uncore_read16(uncore, C0DRB3)); 1169 seq_printf(m, "C1DRB3 = 0x%04x\n", 1170 intel_uncore_read16(uncore, C1DRB3)); 1171 } else if (INTEL_GEN(dev_priv) >= 6) { 1172 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1173 intel_uncore_read(uncore, MAD_DIMM_C0)); 1174 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1175 intel_uncore_read(uncore, MAD_DIMM_C1)); 1176 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1177 intel_uncore_read(uncore, MAD_DIMM_C2)); 1178 seq_printf(m, "TILECTL = 0x%08x\n", 1179 intel_uncore_read(uncore, TILECTL)); 1180 if (INTEL_GEN(dev_priv) >= 8) 1181 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1182 intel_uncore_read(uncore, GAMTARBMODE)); 1183 else 1184 seq_printf(m, "ARB_MODE = 0x%08x\n", 1185 intel_uncore_read(uncore, ARB_MODE)); 1186 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1187 intel_uncore_read(uncore, DISP_ARB_CTL)); 1188 } 1189 1190 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1191 1192 return 0; 1193 } 1194 1195 static const char *rps_power_to_str(unsigned int power) 1196 { 1197 static const char * const strings[] = { 1198 [LOW_POWER] = "low power", 1199 [BETWEEN] = "mixed", 1200 [HIGH_POWER] = "high power", 1201 }; 1202 1203 if (power >= ARRAY_SIZE(strings) || !strings[power]) 1204 return "unknown"; 1205 1206 return strings[power]; 1207 } 1208 1209 static int i915_rps_boost_info(struct seq_file *m, void *data) 1210 { 1211 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1212 struct intel_rps *rps = &dev_priv->gt.rps; 1213 1214 seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps))); 1215 seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps))); 1216 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake)); 1217 seq_printf(m, "Boosts outstanding? %d\n", 1218 atomic_read(&rps->num_waiters)); 1219 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); 1220 seq_printf(m, "Frequency requested %d, actual %d\n", 1221 intel_gpu_freq(rps, rps->cur_freq), 1222 intel_rps_read_actual_frequency(rps)); 1223 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 1224 intel_gpu_freq(rps, rps->min_freq), 1225 intel_gpu_freq(rps, rps->min_freq_softlimit), 1226 intel_gpu_freq(rps, rps->max_freq_softlimit), 1227 intel_gpu_freq(rps, rps->max_freq)); 1228 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 1229 intel_gpu_freq(rps, rps->idle_freq), 1230 intel_gpu_freq(rps, rps->efficient_freq), 1231 intel_gpu_freq(rps, rps->boost_freq)); 1232 1233 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); 1234 1235 if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) { 1236 u32 rpup, rpupei; 1237 u32 rpdown, rpdownei; 1238 1239 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 1240 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; 1241 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; 1242 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; 1243 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; 1244 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 1245 1246 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", 1247 rps_power_to_str(rps->power.mode)); 1248 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", 1249 rpup && rpupei ? 100 * rpup / rpupei : 0, 1250 rps->power.up_threshold); 1251 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", 1252 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, 1253 rps->power.down_threshold); 1254 } else { 1255 seq_puts(m, "\nRPS Autotuning inactive\n"); 1256 } 1257 1258 return 0; 1259 } 1260 1261 static int i915_llc(struct seq_file *m, void *data) 1262 { 1263 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1264 const bool edram = INTEL_GEN(dev_priv) > 8; 1265 1266 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv))); 1267 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC", 1268 dev_priv->edram_size_mb); 1269 1270 return 0; 1271 } 1272 1273 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 1274 { 1275 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1276 struct pci_dev *pdev = dev_priv->drm.pdev; 1277 1278 if (!HAS_RUNTIME_PM(dev_priv)) 1279 seq_puts(m, "Runtime power management not supported\n"); 1280 1281 seq_printf(m, "Runtime power status: %s\n", 1282 enableddisabled(!dev_priv->power_domains.wakeref)); 1283 1284 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); 1285 seq_printf(m, "IRQs disabled: %s\n", 1286 yesno(!intel_irqs_enabled(dev_priv))); 1287 #ifdef CONFIG_PM 1288 seq_printf(m, "Usage count: %d\n", 1289 atomic_read(&dev_priv->drm.dev->power.usage_count)); 1290 #else 1291 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 1292 #endif 1293 seq_printf(m, "PCI device power state: %s [%d]\n", 1294 pci_power_name(pdev->current_state), 1295 pdev->current_state); 1296 1297 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) { 1298 struct drm_printer p = drm_seq_file_printer(m); 1299 1300 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p); 1301 } 1302 1303 return 0; 1304 } 1305 1306 static int i915_engine_info(struct seq_file *m, void *unused) 1307 { 1308 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1309 struct intel_engine_cs *engine; 1310 intel_wakeref_t wakeref; 1311 struct drm_printer p; 1312 1313 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 1314 1315 seq_printf(m, "GT awake? %s [%d]\n", 1316 yesno(dev_priv->gt.awake), 1317 atomic_read(&dev_priv->gt.wakeref.count)); 1318 seq_printf(m, "CS timestamp frequency: %u Hz\n", 1319 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz); 1320 1321 p = drm_seq_file_printer(m); 1322 for_each_uabi_engine(engine, dev_priv) 1323 intel_engine_dump(engine, &p, "%s\n", engine->name); 1324 1325 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 1326 1327 return 0; 1328 } 1329 1330 static int i915_shrinker_info(struct seq_file *m, void *unused) 1331 { 1332 struct drm_i915_private *i915 = node_to_i915(m->private); 1333 1334 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks); 1335 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch); 1336 1337 return 0; 1338 } 1339 1340 static int i915_wa_registers(struct seq_file *m, void *unused) 1341 { 1342 struct drm_i915_private *i915 = node_to_i915(m->private); 1343 struct intel_engine_cs *engine; 1344 1345 for_each_uabi_engine(engine, i915) { 1346 const struct i915_wa_list *wal = &engine->ctx_wa_list; 1347 const struct i915_wa *wa; 1348 unsigned int count; 1349 1350 count = wal->count; 1351 if (!count) 1352 continue; 1353 1354 seq_printf(m, "%s: Workarounds applied: %u\n", 1355 engine->name, count); 1356 1357 for (wa = wal->list; count--; wa++) 1358 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", 1359 i915_mmio_reg_offset(wa->reg), 1360 wa->set, wa->clr); 1361 1362 seq_printf(m, "\n"); 1363 } 1364 1365 return 0; 1366 } 1367 1368 static int 1369 i915_wedged_get(void *data, u64 *val) 1370 { 1371 struct drm_i915_private *i915 = data; 1372 int ret = intel_gt_terminally_wedged(&i915->gt); 1373 1374 switch (ret) { 1375 case -EIO: 1376 *val = 1; 1377 return 0; 1378 case 0: 1379 *val = 0; 1380 return 0; 1381 default: 1382 return ret; 1383 } 1384 } 1385 1386 static int 1387 i915_wedged_set(void *data, u64 val) 1388 { 1389 struct drm_i915_private *i915 = data; 1390 1391 /* Flush any previous reset before applying for a new one */ 1392 wait_event(i915->gt.reset.queue, 1393 !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags)); 1394 1395 intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE, 1396 "Manually set wedged engine mask = %llx", val); 1397 return 0; 1398 } 1399 1400 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 1401 i915_wedged_get, i915_wedged_set, 1402 "%llu\n"); 1403 1404 static int 1405 i915_perf_noa_delay_set(void *data, u64 val) 1406 { 1407 struct drm_i915_private *i915 = data; 1408 1409 /* 1410 * This would lead to infinite waits as we're doing timestamp 1411 * difference on the CS with only 32bits. 1412 */ 1413 if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX) 1414 return -EINVAL; 1415 1416 atomic64_set(&i915->perf.noa_programming_delay, val); 1417 return 0; 1418 } 1419 1420 static int 1421 i915_perf_noa_delay_get(void *data, u64 *val) 1422 { 1423 struct drm_i915_private *i915 = data; 1424 1425 *val = atomic64_read(&i915->perf.noa_programming_delay); 1426 return 0; 1427 } 1428 1429 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops, 1430 i915_perf_noa_delay_get, 1431 i915_perf_noa_delay_set, 1432 "%llu\n"); 1433 1434 #define DROP_UNBOUND BIT(0) 1435 #define DROP_BOUND BIT(1) 1436 #define DROP_RETIRE BIT(2) 1437 #define DROP_ACTIVE BIT(3) 1438 #define DROP_FREED BIT(4) 1439 #define DROP_SHRINK_ALL BIT(5) 1440 #define DROP_IDLE BIT(6) 1441 #define DROP_RESET_ACTIVE BIT(7) 1442 #define DROP_RESET_SEQNO BIT(8) 1443 #define DROP_RCU BIT(9) 1444 #define DROP_ALL (DROP_UNBOUND | \ 1445 DROP_BOUND | \ 1446 DROP_RETIRE | \ 1447 DROP_ACTIVE | \ 1448 DROP_FREED | \ 1449 DROP_SHRINK_ALL |\ 1450 DROP_IDLE | \ 1451 DROP_RESET_ACTIVE | \ 1452 DROP_RESET_SEQNO | \ 1453 DROP_RCU) 1454 static int 1455 i915_drop_caches_get(void *data, u64 *val) 1456 { 1457 *val = DROP_ALL; 1458 1459 return 0; 1460 } 1461 static int 1462 gt_drop_caches(struct intel_gt *gt, u64 val) 1463 { 1464 int ret; 1465 1466 if (val & DROP_RESET_ACTIVE && 1467 wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) 1468 intel_gt_set_wedged(gt); 1469 1470 if (val & DROP_RETIRE) 1471 intel_gt_retire_requests(gt); 1472 1473 if (val & (DROP_IDLE | DROP_ACTIVE)) { 1474 ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); 1475 if (ret) 1476 return ret; 1477 } 1478 1479 if (val & DROP_IDLE) { 1480 ret = intel_gt_pm_wait_for_idle(gt); 1481 if (ret) 1482 return ret; 1483 } 1484 1485 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt)) 1486 intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL); 1487 1488 if (val & DROP_FREED) 1489 intel_gt_flush_buffer_pool(gt); 1490 1491 return 0; 1492 } 1493 1494 static int 1495 i915_drop_caches_set(void *data, u64 val) 1496 { 1497 struct drm_i915_private *i915 = data; 1498 int ret; 1499 1500 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", 1501 val, val & DROP_ALL); 1502 1503 ret = gt_drop_caches(&i915->gt, val); 1504 if (ret) 1505 return ret; 1506 1507 fs_reclaim_acquire(GFP_KERNEL); 1508 if (val & DROP_BOUND) 1509 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND); 1510 1511 if (val & DROP_UNBOUND) 1512 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND); 1513 1514 if (val & DROP_SHRINK_ALL) 1515 i915_gem_shrink_all(i915); 1516 fs_reclaim_release(GFP_KERNEL); 1517 1518 if (val & DROP_RCU) 1519 rcu_barrier(); 1520 1521 if (val & DROP_FREED) 1522 i915_gem_drain_freed_objects(i915); 1523 1524 return 0; 1525 } 1526 1527 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 1528 i915_drop_caches_get, i915_drop_caches_set, 1529 "0x%08llx\n"); 1530 1531 static int 1532 i915_cache_sharing_get(void *data, u64 *val) 1533 { 1534 struct drm_i915_private *dev_priv = data; 1535 intel_wakeref_t wakeref; 1536 u32 snpcr = 0; 1537 1538 if (!(IS_GEN_RANGE(dev_priv, 6, 7))) 1539 return -ENODEV; 1540 1541 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) 1542 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1543 1544 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 1545 1546 return 0; 1547 } 1548 1549 static int 1550 i915_cache_sharing_set(void *data, u64 val) 1551 { 1552 struct drm_i915_private *dev_priv = data; 1553 intel_wakeref_t wakeref; 1554 1555 if (!(IS_GEN_RANGE(dev_priv, 6, 7))) 1556 return -ENODEV; 1557 1558 if (val > 3) 1559 return -EINVAL; 1560 1561 drm_dbg(&dev_priv->drm, 1562 "Manually setting uncore sharing to %llu\n", val); 1563 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { 1564 u32 snpcr; 1565 1566 /* Update the cache sharing policy here as well */ 1567 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 1568 snpcr &= ~GEN6_MBC_SNPCR_MASK; 1569 snpcr |= val << GEN6_MBC_SNPCR_SHIFT; 1570 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 1571 } 1572 1573 return 0; 1574 } 1575 1576 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 1577 i915_cache_sharing_get, i915_cache_sharing_set, 1578 "%llu\n"); 1579 1580 static int i915_sseu_status(struct seq_file *m, void *unused) 1581 { 1582 struct drm_i915_private *i915 = node_to_i915(m->private); 1583 struct intel_gt *gt = &i915->gt; 1584 1585 return intel_sseu_status(m, gt); 1586 } 1587 1588 static int i915_forcewake_open(struct inode *inode, struct file *file) 1589 { 1590 struct drm_i915_private *i915 = inode->i_private; 1591 struct intel_gt *gt = &i915->gt; 1592 1593 atomic_inc(>->user_wakeref); 1594 intel_gt_pm_get(gt); 1595 if (INTEL_GEN(i915) >= 6) 1596 intel_uncore_forcewake_user_get(gt->uncore); 1597 1598 return 0; 1599 } 1600 1601 static int i915_forcewake_release(struct inode *inode, struct file *file) 1602 { 1603 struct drm_i915_private *i915 = inode->i_private; 1604 struct intel_gt *gt = &i915->gt; 1605 1606 if (INTEL_GEN(i915) >= 6) 1607 intel_uncore_forcewake_user_put(&i915->uncore); 1608 intel_gt_pm_put(gt); 1609 atomic_dec(>->user_wakeref); 1610 1611 return 0; 1612 } 1613 1614 static const struct file_operations i915_forcewake_fops = { 1615 .owner = THIS_MODULE, 1616 .open = i915_forcewake_open, 1617 .release = i915_forcewake_release, 1618 }; 1619 1620 static const struct drm_info_list i915_debugfs_list[] = { 1621 {"i915_capabilities", i915_capabilities, 0}, 1622 {"i915_gem_objects", i915_gem_object_info, 0}, 1623 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1624 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1625 {"i915_frequency_info", i915_frequency_info, 0}, 1626 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 1627 {"i915_context_status", i915_context_status, 0}, 1628 {"i915_swizzle_info", i915_swizzle_info, 0}, 1629 {"i915_llc", i915_llc, 0}, 1630 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 1631 {"i915_engine_info", i915_engine_info, 0}, 1632 {"i915_shrinker_info", i915_shrinker_info, 0}, 1633 {"i915_wa_registers", i915_wa_registers, 0}, 1634 {"i915_sseu_status", i915_sseu_status, 0}, 1635 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 1636 }; 1637 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 1638 1639 static const struct i915_debugfs_files { 1640 const char *name; 1641 const struct file_operations *fops; 1642 } i915_debugfs_files[] = { 1643 {"i915_perf_noa_delay", &i915_perf_noa_delay_fops}, 1644 {"i915_wedged", &i915_wedged_fops}, 1645 {"i915_cache_sharing", &i915_cache_sharing_fops}, 1646 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 1647 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 1648 {"i915_error_state", &i915_error_state_fops}, 1649 {"i915_gpu_info", &i915_gpu_info_fops}, 1650 #endif 1651 }; 1652 1653 void i915_debugfs_register(struct drm_i915_private *dev_priv) 1654 { 1655 struct drm_minor *minor = dev_priv->drm.primary; 1656 int i; 1657 1658 i915_debugfs_params(dev_priv); 1659 1660 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root, 1661 to_i915(minor->dev), &i915_forcewake_fops); 1662 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 1663 debugfs_create_file(i915_debugfs_files[i].name, 1664 S_IRUGO | S_IWUSR, 1665 minor->debugfs_root, 1666 to_i915(minor->dev), 1667 i915_debugfs_files[i].fops); 1668 } 1669 1670 drm_debugfs_create_files(i915_debugfs_list, 1671 I915_DEBUGFS_ENTRIES, 1672 minor->debugfs_root, minor); 1673 } 1674