1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/sort.h> 31 #include "intel_drv.h" 32 33 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 34 { 35 return to_i915(node->minor->dev); 36 } 37 38 static __always_inline void seq_print_param(struct seq_file *m, 39 const char *name, 40 const char *type, 41 const void *x) 42 { 43 if (!__builtin_strcmp(type, "bool")) 44 seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x)); 45 else if (!__builtin_strcmp(type, "int")) 46 seq_printf(m, "i915.%s=%d\n", name, *(const int *)x); 47 else if (!__builtin_strcmp(type, "unsigned int")) 48 seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x); 49 else if (!__builtin_strcmp(type, "char *")) 50 seq_printf(m, "i915.%s=%s\n", name, *(const char **)x); 51 else 52 BUILD_BUG(); 53 } 54 55 static int i915_capabilities(struct seq_file *m, void *data) 56 { 57 struct drm_i915_private *dev_priv = node_to_i915(m->private); 58 const struct intel_device_info *info = INTEL_INFO(dev_priv); 59 60 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); 61 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); 62 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); 63 64 #define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x)) 65 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); 66 #undef PRINT_FLAG 67 68 kernel_param_lock(THIS_MODULE); 69 #define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x); 70 I915_PARAMS_FOR_EACH(PRINT_PARAM); 71 #undef PRINT_PARAM 72 kernel_param_unlock(THIS_MODULE); 73 74 return 0; 75 } 76 77 static char get_active_flag(struct drm_i915_gem_object *obj) 78 { 79 return i915_gem_object_is_active(obj) ? '*' : ' '; 80 } 81 82 static char get_pin_flag(struct drm_i915_gem_object *obj) 83 { 84 return obj->pin_display ? 'p' : ' '; 85 } 86 87 static char get_tiling_flag(struct drm_i915_gem_object *obj) 88 { 89 switch (i915_gem_object_get_tiling(obj)) { 90 default: 91 case I915_TILING_NONE: return ' '; 92 case I915_TILING_X: return 'X'; 93 case I915_TILING_Y: return 'Y'; 94 } 95 } 96 97 static char get_global_flag(struct drm_i915_gem_object *obj) 98 { 99 return !list_empty(&obj->userfault_link) ? 'g' : ' '; 100 } 101 102 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 103 { 104 return obj->mm.mapping ? 'M' : ' '; 105 } 106 107 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 108 { 109 u64 size = 0; 110 struct i915_vma *vma; 111 112 list_for_each_entry(vma, &obj->vma_list, obj_link) { 113 if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node)) 114 size += vma->node.size; 115 } 116 117 return size; 118 } 119 120 static void 121 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 122 { 123 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 124 struct intel_engine_cs *engine; 125 struct i915_vma *vma; 126 unsigned int frontbuffer_bits; 127 int pin_count = 0; 128 129 lockdep_assert_held(&obj->base.dev->struct_mutex); 130 131 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s", 132 &obj->base, 133 get_active_flag(obj), 134 get_pin_flag(obj), 135 get_tiling_flag(obj), 136 get_global_flag(obj), 137 get_pin_mapped_flag(obj), 138 obj->base.size / 1024, 139 obj->base.read_domains, 140 obj->base.write_domain, 141 i915_cache_level_str(dev_priv, obj->cache_level), 142 obj->mm.dirty ? " dirty" : "", 143 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); 144 if (obj->base.name) 145 seq_printf(m, " (name: %d)", obj->base.name); 146 list_for_each_entry(vma, &obj->vma_list, obj_link) { 147 if (i915_vma_is_pinned(vma)) 148 pin_count++; 149 } 150 seq_printf(m, " (pinned x %d)", pin_count); 151 if (obj->pin_display) 152 seq_printf(m, " (display)"); 153 list_for_each_entry(vma, &obj->vma_list, obj_link) { 154 if (!drm_mm_node_allocated(&vma->node)) 155 continue; 156 157 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", 158 i915_vma_is_ggtt(vma) ? "g" : "pp", 159 vma->node.start, vma->node.size); 160 if (i915_vma_is_ggtt(vma)) { 161 switch (vma->ggtt_view.type) { 162 case I915_GGTT_VIEW_NORMAL: 163 seq_puts(m, ", normal"); 164 break; 165 166 case I915_GGTT_VIEW_PARTIAL: 167 seq_printf(m, ", partial [%08llx+%x]", 168 vma->ggtt_view.partial.offset << PAGE_SHIFT, 169 vma->ggtt_view.partial.size << PAGE_SHIFT); 170 break; 171 172 case I915_GGTT_VIEW_ROTATED: 173 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", 174 vma->ggtt_view.rotated.plane[0].width, 175 vma->ggtt_view.rotated.plane[0].height, 176 vma->ggtt_view.rotated.plane[0].stride, 177 vma->ggtt_view.rotated.plane[0].offset, 178 vma->ggtt_view.rotated.plane[1].width, 179 vma->ggtt_view.rotated.plane[1].height, 180 vma->ggtt_view.rotated.plane[1].stride, 181 vma->ggtt_view.rotated.plane[1].offset); 182 break; 183 184 default: 185 MISSING_CASE(vma->ggtt_view.type); 186 break; 187 } 188 } 189 if (vma->fence) 190 seq_printf(m, " , fence: %d%s", 191 vma->fence->id, 192 i915_gem_active_isset(&vma->last_fence) ? "*" : ""); 193 seq_puts(m, ")"); 194 } 195 if (obj->stolen) 196 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 197 198 engine = i915_gem_object_last_write_engine(obj); 199 if (engine) 200 seq_printf(m, " (%s)", engine->name); 201 202 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); 203 if (frontbuffer_bits) 204 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits); 205 } 206 207 static int obj_rank_by_stolen(const void *A, const void *B) 208 { 209 const struct drm_i915_gem_object *a = 210 *(const struct drm_i915_gem_object **)A; 211 const struct drm_i915_gem_object *b = 212 *(const struct drm_i915_gem_object **)B; 213 214 if (a->stolen->start < b->stolen->start) 215 return -1; 216 if (a->stolen->start > b->stolen->start) 217 return 1; 218 return 0; 219 } 220 221 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 222 { 223 struct drm_i915_private *dev_priv = node_to_i915(m->private); 224 struct drm_device *dev = &dev_priv->drm; 225 struct drm_i915_gem_object **objects; 226 struct drm_i915_gem_object *obj; 227 u64 total_obj_size, total_gtt_size; 228 unsigned long total, count, n; 229 int ret; 230 231 total = READ_ONCE(dev_priv->mm.object_count); 232 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL); 233 if (!objects) 234 return -ENOMEM; 235 236 ret = mutex_lock_interruptible(&dev->struct_mutex); 237 if (ret) 238 goto out; 239 240 total_obj_size = total_gtt_size = count = 0; 241 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { 242 if (count == total) 243 break; 244 245 if (obj->stolen == NULL) 246 continue; 247 248 objects[count++] = obj; 249 total_obj_size += obj->base.size; 250 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 251 252 } 253 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) { 254 if (count == total) 255 break; 256 257 if (obj->stolen == NULL) 258 continue; 259 260 objects[count++] = obj; 261 total_obj_size += obj->base.size; 262 } 263 264 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL); 265 266 seq_puts(m, "Stolen:\n"); 267 for (n = 0; n < count; n++) { 268 seq_puts(m, " "); 269 describe_obj(m, objects[n]); 270 seq_putc(m, '\n'); 271 } 272 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n", 273 count, total_obj_size, total_gtt_size); 274 275 mutex_unlock(&dev->struct_mutex); 276 out: 277 kvfree(objects); 278 return ret; 279 } 280 281 struct file_stats { 282 struct drm_i915_file_private *file_priv; 283 unsigned long count; 284 u64 total, unbound; 285 u64 global, shared; 286 u64 active, inactive; 287 }; 288 289 static int per_file_stats(int id, void *ptr, void *data) 290 { 291 struct drm_i915_gem_object *obj = ptr; 292 struct file_stats *stats = data; 293 struct i915_vma *vma; 294 295 lockdep_assert_held(&obj->base.dev->struct_mutex); 296 297 stats->count++; 298 stats->total += obj->base.size; 299 if (!obj->bind_count) 300 stats->unbound += obj->base.size; 301 if (obj->base.name || obj->base.dma_buf) 302 stats->shared += obj->base.size; 303 304 list_for_each_entry(vma, &obj->vma_list, obj_link) { 305 if (!drm_mm_node_allocated(&vma->node)) 306 continue; 307 308 if (i915_vma_is_ggtt(vma)) { 309 stats->global += vma->node.size; 310 } else { 311 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); 312 313 if (ppgtt->base.file != stats->file_priv) 314 continue; 315 } 316 317 if (i915_vma_is_active(vma)) 318 stats->active += vma->node.size; 319 else 320 stats->inactive += vma->node.size; 321 } 322 323 return 0; 324 } 325 326 #define print_file_stats(m, name, stats) do { \ 327 if (stats.count) \ 328 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 329 name, \ 330 stats.count, \ 331 stats.total, \ 332 stats.active, \ 333 stats.inactive, \ 334 stats.global, \ 335 stats.shared, \ 336 stats.unbound); \ 337 } while (0) 338 339 static void print_batch_pool_stats(struct seq_file *m, 340 struct drm_i915_private *dev_priv) 341 { 342 struct drm_i915_gem_object *obj; 343 struct file_stats stats; 344 struct intel_engine_cs *engine; 345 enum intel_engine_id id; 346 int j; 347 348 memset(&stats, 0, sizeof(stats)); 349 350 for_each_engine(engine, dev_priv, id) { 351 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 352 list_for_each_entry(obj, 353 &engine->batch_pool.cache_list[j], 354 batch_pool_link) 355 per_file_stats(0, obj, &stats); 356 } 357 } 358 359 print_file_stats(m, "[k]batch pool", stats); 360 } 361 362 static int per_file_ctx_stats(int id, void *ptr, void *data) 363 { 364 struct i915_gem_context *ctx = ptr; 365 int n; 366 367 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { 368 if (ctx->engine[n].state) 369 per_file_stats(0, ctx->engine[n].state->obj, data); 370 if (ctx->engine[n].ring) 371 per_file_stats(0, ctx->engine[n].ring->vma->obj, data); 372 } 373 374 return 0; 375 } 376 377 static void print_context_stats(struct seq_file *m, 378 struct drm_i915_private *dev_priv) 379 { 380 struct drm_device *dev = &dev_priv->drm; 381 struct file_stats stats; 382 struct drm_file *file; 383 384 memset(&stats, 0, sizeof(stats)); 385 386 mutex_lock(&dev->struct_mutex); 387 if (dev_priv->kernel_context) 388 per_file_ctx_stats(0, dev_priv->kernel_context, &stats); 389 390 list_for_each_entry(file, &dev->filelist, lhead) { 391 struct drm_i915_file_private *fpriv = file->driver_priv; 392 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); 393 } 394 mutex_unlock(&dev->struct_mutex); 395 396 print_file_stats(m, "[k]contexts", stats); 397 } 398 399 static int i915_gem_object_info(struct seq_file *m, void *data) 400 { 401 struct drm_i915_private *dev_priv = node_to_i915(m->private); 402 struct drm_device *dev = &dev_priv->drm; 403 struct i915_ggtt *ggtt = &dev_priv->ggtt; 404 u32 count, mapped_count, purgeable_count, dpy_count; 405 u64 size, mapped_size, purgeable_size, dpy_size; 406 struct drm_i915_gem_object *obj; 407 struct drm_file *file; 408 int ret; 409 410 ret = mutex_lock_interruptible(&dev->struct_mutex); 411 if (ret) 412 return ret; 413 414 seq_printf(m, "%u objects, %llu bytes\n", 415 dev_priv->mm.object_count, 416 dev_priv->mm.object_memory); 417 418 size = count = 0; 419 mapped_size = mapped_count = 0; 420 purgeable_size = purgeable_count = 0; 421 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) { 422 size += obj->base.size; 423 ++count; 424 425 if (obj->mm.madv == I915_MADV_DONTNEED) { 426 purgeable_size += obj->base.size; 427 ++purgeable_count; 428 } 429 430 if (obj->mm.mapping) { 431 mapped_count++; 432 mapped_size += obj->base.size; 433 } 434 } 435 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 436 437 size = count = dpy_size = dpy_count = 0; 438 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { 439 size += obj->base.size; 440 ++count; 441 442 if (obj->pin_display) { 443 dpy_size += obj->base.size; 444 ++dpy_count; 445 } 446 447 if (obj->mm.madv == I915_MADV_DONTNEED) { 448 purgeable_size += obj->base.size; 449 ++purgeable_count; 450 } 451 452 if (obj->mm.mapping) { 453 mapped_count++; 454 mapped_size += obj->base.size; 455 } 456 } 457 seq_printf(m, "%u bound objects, %llu bytes\n", 458 count, size); 459 seq_printf(m, "%u purgeable objects, %llu bytes\n", 460 purgeable_count, purgeable_size); 461 seq_printf(m, "%u mapped objects, %llu bytes\n", 462 mapped_count, mapped_size); 463 seq_printf(m, "%u display objects (pinned), %llu bytes\n", 464 dpy_count, dpy_size); 465 466 seq_printf(m, "%llu [%llu] gtt total\n", 467 ggtt->base.total, ggtt->mappable_end); 468 469 seq_putc(m, '\n'); 470 print_batch_pool_stats(m, dev_priv); 471 mutex_unlock(&dev->struct_mutex); 472 473 mutex_lock(&dev->filelist_mutex); 474 print_context_stats(m, dev_priv); 475 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 476 struct file_stats stats; 477 struct drm_i915_file_private *file_priv = file->driver_priv; 478 struct drm_i915_gem_request *request; 479 struct task_struct *task; 480 481 mutex_lock(&dev->struct_mutex); 482 483 memset(&stats, 0, sizeof(stats)); 484 stats.file_priv = file->driver_priv; 485 spin_lock(&file->table_lock); 486 idr_for_each(&file->object_idr, per_file_stats, &stats); 487 spin_unlock(&file->table_lock); 488 /* 489 * Although we have a valid reference on file->pid, that does 490 * not guarantee that the task_struct who called get_pid() is 491 * still alive (e.g. get_pid(current) => fork() => exit()). 492 * Therefore, we need to protect this ->comm access using RCU. 493 */ 494 request = list_first_entry_or_null(&file_priv->mm.request_list, 495 struct drm_i915_gem_request, 496 client_link); 497 rcu_read_lock(); 498 task = pid_task(request && request->ctx->pid ? 499 request->ctx->pid : file->pid, 500 PIDTYPE_PID); 501 print_file_stats(m, task ? task->comm : "<unknown>", stats); 502 rcu_read_unlock(); 503 504 mutex_unlock(&dev->struct_mutex); 505 } 506 mutex_unlock(&dev->filelist_mutex); 507 508 return 0; 509 } 510 511 static int i915_gem_gtt_info(struct seq_file *m, void *data) 512 { 513 struct drm_info_node *node = m->private; 514 struct drm_i915_private *dev_priv = node_to_i915(node); 515 struct drm_device *dev = &dev_priv->drm; 516 bool show_pin_display_only = !!node->info_ent->data; 517 struct drm_i915_gem_object *obj; 518 u64 total_obj_size, total_gtt_size; 519 int count, ret; 520 521 ret = mutex_lock_interruptible(&dev->struct_mutex); 522 if (ret) 523 return ret; 524 525 total_obj_size = total_gtt_size = count = 0; 526 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) { 527 if (show_pin_display_only && !obj->pin_display) 528 continue; 529 530 seq_puts(m, " "); 531 describe_obj(m, obj); 532 seq_putc(m, '\n'); 533 total_obj_size += obj->base.size; 534 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 535 count++; 536 } 537 538 mutex_unlock(&dev->struct_mutex); 539 540 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 541 count, total_obj_size, total_gtt_size); 542 543 return 0; 544 } 545 546 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 547 { 548 struct drm_i915_private *dev_priv = node_to_i915(m->private); 549 struct drm_device *dev = &dev_priv->drm; 550 struct drm_i915_gem_object *obj; 551 struct intel_engine_cs *engine; 552 enum intel_engine_id id; 553 int total = 0; 554 int ret, j; 555 556 ret = mutex_lock_interruptible(&dev->struct_mutex); 557 if (ret) 558 return ret; 559 560 for_each_engine(engine, dev_priv, id) { 561 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 562 int count; 563 564 count = 0; 565 list_for_each_entry(obj, 566 &engine->batch_pool.cache_list[j], 567 batch_pool_link) 568 count++; 569 seq_printf(m, "%s cache[%d]: %d objects\n", 570 engine->name, j, count); 571 572 list_for_each_entry(obj, 573 &engine->batch_pool.cache_list[j], 574 batch_pool_link) { 575 seq_puts(m, " "); 576 describe_obj(m, obj); 577 seq_putc(m, '\n'); 578 } 579 580 total += count; 581 } 582 } 583 584 seq_printf(m, "total: %d\n", total); 585 586 mutex_unlock(&dev->struct_mutex); 587 588 return 0; 589 } 590 591 static void print_request(struct seq_file *m, 592 struct drm_i915_gem_request *rq, 593 const char *prefix) 594 { 595 seq_printf(m, "%s%x [%x:%x] prio=%d @ %dms: %s\n", prefix, 596 rq->global_seqno, rq->ctx->hw_id, rq->fence.seqno, 597 rq->priotree.priority, 598 jiffies_to_msecs(jiffies - rq->emitted_jiffies), 599 rq->timeline->common->name); 600 } 601 602 static int i915_gem_request_info(struct seq_file *m, void *data) 603 { 604 struct drm_i915_private *dev_priv = node_to_i915(m->private); 605 struct drm_device *dev = &dev_priv->drm; 606 struct drm_i915_gem_request *req; 607 struct intel_engine_cs *engine; 608 enum intel_engine_id id; 609 int ret, any; 610 611 ret = mutex_lock_interruptible(&dev->struct_mutex); 612 if (ret) 613 return ret; 614 615 any = 0; 616 for_each_engine(engine, dev_priv, id) { 617 int count; 618 619 count = 0; 620 list_for_each_entry(req, &engine->timeline->requests, link) 621 count++; 622 if (count == 0) 623 continue; 624 625 seq_printf(m, "%s requests: %d\n", engine->name, count); 626 list_for_each_entry(req, &engine->timeline->requests, link) 627 print_request(m, req, " "); 628 629 any++; 630 } 631 mutex_unlock(&dev->struct_mutex); 632 633 if (any == 0) 634 seq_puts(m, "No requests\n"); 635 636 return 0; 637 } 638 639 static void i915_ring_seqno_info(struct seq_file *m, 640 struct intel_engine_cs *engine) 641 { 642 struct intel_breadcrumbs *b = &engine->breadcrumbs; 643 struct rb_node *rb; 644 645 seq_printf(m, "Current sequence (%s): %x\n", 646 engine->name, intel_engine_get_seqno(engine)); 647 648 spin_lock_irq(&b->rb_lock); 649 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 650 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 651 652 seq_printf(m, "Waiting (%s): %s [%d] on %x\n", 653 engine->name, w->tsk->comm, w->tsk->pid, w->seqno); 654 } 655 spin_unlock_irq(&b->rb_lock); 656 } 657 658 static int i915_gem_seqno_info(struct seq_file *m, void *data) 659 { 660 struct drm_i915_private *dev_priv = node_to_i915(m->private); 661 struct intel_engine_cs *engine; 662 enum intel_engine_id id; 663 664 for_each_engine(engine, dev_priv, id) 665 i915_ring_seqno_info(m, engine); 666 667 return 0; 668 } 669 670 671 static int i915_interrupt_info(struct seq_file *m, void *data) 672 { 673 struct drm_i915_private *dev_priv = node_to_i915(m->private); 674 struct intel_engine_cs *engine; 675 enum intel_engine_id id; 676 int i, pipe; 677 678 intel_runtime_pm_get(dev_priv); 679 680 if (IS_CHERRYVIEW(dev_priv)) { 681 seq_printf(m, "Master Interrupt Control:\t%08x\n", 682 I915_READ(GEN8_MASTER_IRQ)); 683 684 seq_printf(m, "Display IER:\t%08x\n", 685 I915_READ(VLV_IER)); 686 seq_printf(m, "Display IIR:\t%08x\n", 687 I915_READ(VLV_IIR)); 688 seq_printf(m, "Display IIR_RW:\t%08x\n", 689 I915_READ(VLV_IIR_RW)); 690 seq_printf(m, "Display IMR:\t%08x\n", 691 I915_READ(VLV_IMR)); 692 for_each_pipe(dev_priv, pipe) { 693 enum intel_display_power_domain power_domain; 694 695 power_domain = POWER_DOMAIN_PIPE(pipe); 696 if (!intel_display_power_get_if_enabled(dev_priv, 697 power_domain)) { 698 seq_printf(m, "Pipe %c power disabled\n", 699 pipe_name(pipe)); 700 continue; 701 } 702 703 seq_printf(m, "Pipe %c stat:\t%08x\n", 704 pipe_name(pipe), 705 I915_READ(PIPESTAT(pipe))); 706 707 intel_display_power_put(dev_priv, power_domain); 708 } 709 710 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 711 seq_printf(m, "Port hotplug:\t%08x\n", 712 I915_READ(PORT_HOTPLUG_EN)); 713 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 714 I915_READ(VLV_DPFLIPSTAT)); 715 seq_printf(m, "DPINVGTT:\t%08x\n", 716 I915_READ(DPINVGTT)); 717 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 718 719 for (i = 0; i < 4; i++) { 720 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 721 i, I915_READ(GEN8_GT_IMR(i))); 722 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 723 i, I915_READ(GEN8_GT_IIR(i))); 724 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 725 i, I915_READ(GEN8_GT_IER(i))); 726 } 727 728 seq_printf(m, "PCU interrupt mask:\t%08x\n", 729 I915_READ(GEN8_PCU_IMR)); 730 seq_printf(m, "PCU interrupt identity:\t%08x\n", 731 I915_READ(GEN8_PCU_IIR)); 732 seq_printf(m, "PCU interrupt enable:\t%08x\n", 733 I915_READ(GEN8_PCU_IER)); 734 } else if (INTEL_GEN(dev_priv) >= 8) { 735 seq_printf(m, "Master Interrupt Control:\t%08x\n", 736 I915_READ(GEN8_MASTER_IRQ)); 737 738 for (i = 0; i < 4; i++) { 739 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 740 i, I915_READ(GEN8_GT_IMR(i))); 741 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 742 i, I915_READ(GEN8_GT_IIR(i))); 743 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 744 i, I915_READ(GEN8_GT_IER(i))); 745 } 746 747 for_each_pipe(dev_priv, pipe) { 748 enum intel_display_power_domain power_domain; 749 750 power_domain = POWER_DOMAIN_PIPE(pipe); 751 if (!intel_display_power_get_if_enabled(dev_priv, 752 power_domain)) { 753 seq_printf(m, "Pipe %c power disabled\n", 754 pipe_name(pipe)); 755 continue; 756 } 757 seq_printf(m, "Pipe %c IMR:\t%08x\n", 758 pipe_name(pipe), 759 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 760 seq_printf(m, "Pipe %c IIR:\t%08x\n", 761 pipe_name(pipe), 762 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 763 seq_printf(m, "Pipe %c IER:\t%08x\n", 764 pipe_name(pipe), 765 I915_READ(GEN8_DE_PIPE_IER(pipe))); 766 767 intel_display_power_put(dev_priv, power_domain); 768 } 769 770 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 771 I915_READ(GEN8_DE_PORT_IMR)); 772 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 773 I915_READ(GEN8_DE_PORT_IIR)); 774 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 775 I915_READ(GEN8_DE_PORT_IER)); 776 777 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 778 I915_READ(GEN8_DE_MISC_IMR)); 779 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 780 I915_READ(GEN8_DE_MISC_IIR)); 781 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 782 I915_READ(GEN8_DE_MISC_IER)); 783 784 seq_printf(m, "PCU interrupt mask:\t%08x\n", 785 I915_READ(GEN8_PCU_IMR)); 786 seq_printf(m, "PCU interrupt identity:\t%08x\n", 787 I915_READ(GEN8_PCU_IIR)); 788 seq_printf(m, "PCU interrupt enable:\t%08x\n", 789 I915_READ(GEN8_PCU_IER)); 790 } else if (IS_VALLEYVIEW(dev_priv)) { 791 seq_printf(m, "Display IER:\t%08x\n", 792 I915_READ(VLV_IER)); 793 seq_printf(m, "Display IIR:\t%08x\n", 794 I915_READ(VLV_IIR)); 795 seq_printf(m, "Display IIR_RW:\t%08x\n", 796 I915_READ(VLV_IIR_RW)); 797 seq_printf(m, "Display IMR:\t%08x\n", 798 I915_READ(VLV_IMR)); 799 for_each_pipe(dev_priv, pipe) { 800 enum intel_display_power_domain power_domain; 801 802 power_domain = POWER_DOMAIN_PIPE(pipe); 803 if (!intel_display_power_get_if_enabled(dev_priv, 804 power_domain)) { 805 seq_printf(m, "Pipe %c power disabled\n", 806 pipe_name(pipe)); 807 continue; 808 } 809 810 seq_printf(m, "Pipe %c stat:\t%08x\n", 811 pipe_name(pipe), 812 I915_READ(PIPESTAT(pipe))); 813 intel_display_power_put(dev_priv, power_domain); 814 } 815 816 seq_printf(m, "Master IER:\t%08x\n", 817 I915_READ(VLV_MASTER_IER)); 818 819 seq_printf(m, "Render IER:\t%08x\n", 820 I915_READ(GTIER)); 821 seq_printf(m, "Render IIR:\t%08x\n", 822 I915_READ(GTIIR)); 823 seq_printf(m, "Render IMR:\t%08x\n", 824 I915_READ(GTIMR)); 825 826 seq_printf(m, "PM IER:\t\t%08x\n", 827 I915_READ(GEN6_PMIER)); 828 seq_printf(m, "PM IIR:\t\t%08x\n", 829 I915_READ(GEN6_PMIIR)); 830 seq_printf(m, "PM IMR:\t\t%08x\n", 831 I915_READ(GEN6_PMIMR)); 832 833 seq_printf(m, "Port hotplug:\t%08x\n", 834 I915_READ(PORT_HOTPLUG_EN)); 835 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 836 I915_READ(VLV_DPFLIPSTAT)); 837 seq_printf(m, "DPINVGTT:\t%08x\n", 838 I915_READ(DPINVGTT)); 839 840 } else if (!HAS_PCH_SPLIT(dev_priv)) { 841 seq_printf(m, "Interrupt enable: %08x\n", 842 I915_READ(IER)); 843 seq_printf(m, "Interrupt identity: %08x\n", 844 I915_READ(IIR)); 845 seq_printf(m, "Interrupt mask: %08x\n", 846 I915_READ(IMR)); 847 for_each_pipe(dev_priv, pipe) 848 seq_printf(m, "Pipe %c stat: %08x\n", 849 pipe_name(pipe), 850 I915_READ(PIPESTAT(pipe))); 851 } else { 852 seq_printf(m, "North Display Interrupt enable: %08x\n", 853 I915_READ(DEIER)); 854 seq_printf(m, "North Display Interrupt identity: %08x\n", 855 I915_READ(DEIIR)); 856 seq_printf(m, "North Display Interrupt mask: %08x\n", 857 I915_READ(DEIMR)); 858 seq_printf(m, "South Display Interrupt enable: %08x\n", 859 I915_READ(SDEIER)); 860 seq_printf(m, "South Display Interrupt identity: %08x\n", 861 I915_READ(SDEIIR)); 862 seq_printf(m, "South Display Interrupt mask: %08x\n", 863 I915_READ(SDEIMR)); 864 seq_printf(m, "Graphics Interrupt enable: %08x\n", 865 I915_READ(GTIER)); 866 seq_printf(m, "Graphics Interrupt identity: %08x\n", 867 I915_READ(GTIIR)); 868 seq_printf(m, "Graphics Interrupt mask: %08x\n", 869 I915_READ(GTIMR)); 870 } 871 for_each_engine(engine, dev_priv, id) { 872 if (INTEL_GEN(dev_priv) >= 6) { 873 seq_printf(m, 874 "Graphics Interrupt mask (%s): %08x\n", 875 engine->name, I915_READ_IMR(engine)); 876 } 877 i915_ring_seqno_info(m, engine); 878 } 879 intel_runtime_pm_put(dev_priv); 880 881 return 0; 882 } 883 884 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 885 { 886 struct drm_i915_private *dev_priv = node_to_i915(m->private); 887 struct drm_device *dev = &dev_priv->drm; 888 int i, ret; 889 890 ret = mutex_lock_interruptible(&dev->struct_mutex); 891 if (ret) 892 return ret; 893 894 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 895 for (i = 0; i < dev_priv->num_fence_regs; i++) { 896 struct i915_vma *vma = dev_priv->fence_regs[i].vma; 897 898 seq_printf(m, "Fence %d, pin count = %d, object = ", 899 i, dev_priv->fence_regs[i].pin_count); 900 if (!vma) 901 seq_puts(m, "unused"); 902 else 903 describe_obj(m, vma->obj); 904 seq_putc(m, '\n'); 905 } 906 907 mutex_unlock(&dev->struct_mutex); 908 return 0; 909 } 910 911 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 912 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 913 size_t count, loff_t *pos) 914 { 915 struct i915_gpu_state *error = file->private_data; 916 struct drm_i915_error_state_buf str; 917 ssize_t ret; 918 loff_t tmp; 919 920 if (!error) 921 return 0; 922 923 ret = i915_error_state_buf_init(&str, error->i915, count, *pos); 924 if (ret) 925 return ret; 926 927 ret = i915_error_state_to_str(&str, error); 928 if (ret) 929 goto out; 930 931 tmp = 0; 932 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes); 933 if (ret < 0) 934 goto out; 935 936 *pos = str.start + ret; 937 out: 938 i915_error_state_buf_release(&str); 939 return ret; 940 } 941 942 static int gpu_state_release(struct inode *inode, struct file *file) 943 { 944 i915_gpu_state_put(file->private_data); 945 return 0; 946 } 947 948 static int i915_gpu_info_open(struct inode *inode, struct file *file) 949 { 950 struct drm_i915_private *i915 = inode->i_private; 951 struct i915_gpu_state *gpu; 952 953 intel_runtime_pm_get(i915); 954 gpu = i915_capture_gpu_state(i915); 955 intel_runtime_pm_put(i915); 956 if (!gpu) 957 return -ENOMEM; 958 959 file->private_data = gpu; 960 return 0; 961 } 962 963 static const struct file_operations i915_gpu_info_fops = { 964 .owner = THIS_MODULE, 965 .open = i915_gpu_info_open, 966 .read = gpu_state_read, 967 .llseek = default_llseek, 968 .release = gpu_state_release, 969 }; 970 971 static ssize_t 972 i915_error_state_write(struct file *filp, 973 const char __user *ubuf, 974 size_t cnt, 975 loff_t *ppos) 976 { 977 struct i915_gpu_state *error = filp->private_data; 978 979 if (!error) 980 return 0; 981 982 DRM_DEBUG_DRIVER("Resetting error state\n"); 983 i915_reset_error_state(error->i915); 984 985 return cnt; 986 } 987 988 static int i915_error_state_open(struct inode *inode, struct file *file) 989 { 990 file->private_data = i915_first_error_state(inode->i_private); 991 return 0; 992 } 993 994 static const struct file_operations i915_error_state_fops = { 995 .owner = THIS_MODULE, 996 .open = i915_error_state_open, 997 .read = gpu_state_read, 998 .write = i915_error_state_write, 999 .llseek = default_llseek, 1000 .release = gpu_state_release, 1001 }; 1002 #endif 1003 1004 static int 1005 i915_next_seqno_set(void *data, u64 val) 1006 { 1007 struct drm_i915_private *dev_priv = data; 1008 struct drm_device *dev = &dev_priv->drm; 1009 int ret; 1010 1011 ret = mutex_lock_interruptible(&dev->struct_mutex); 1012 if (ret) 1013 return ret; 1014 1015 ret = i915_gem_set_global_seqno(dev, val); 1016 mutex_unlock(&dev->struct_mutex); 1017 1018 return ret; 1019 } 1020 1021 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 1022 NULL, i915_next_seqno_set, 1023 "0x%llx\n"); 1024 1025 static int i915_frequency_info(struct seq_file *m, void *unused) 1026 { 1027 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1028 int ret = 0; 1029 1030 intel_runtime_pm_get(dev_priv); 1031 1032 if (IS_GEN5(dev_priv)) { 1033 u16 rgvswctl = I915_READ16(MEMSWCTL); 1034 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1035 1036 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1037 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1038 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1039 MEMSTAT_VID_SHIFT); 1040 seq_printf(m, "Current P-state: %d\n", 1041 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1042 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1043 u32 freq_sts; 1044 1045 mutex_lock(&dev_priv->rps.hw_lock); 1046 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1047 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1048 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1049 1050 seq_printf(m, "actual GPU freq: %d MHz\n", 1051 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1052 1053 seq_printf(m, "current GPU freq: %d MHz\n", 1054 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1055 1056 seq_printf(m, "max GPU freq: %d MHz\n", 1057 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1058 1059 seq_printf(m, "min GPU freq: %d MHz\n", 1060 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1061 1062 seq_printf(m, "idle GPU freq: %d MHz\n", 1063 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1064 1065 seq_printf(m, 1066 "efficient (RPe) frequency: %d MHz\n", 1067 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1068 mutex_unlock(&dev_priv->rps.hw_lock); 1069 } else if (INTEL_GEN(dev_priv) >= 6) { 1070 u32 rp_state_limits; 1071 u32 gt_perf_status; 1072 u32 rp_state_cap; 1073 u32 rpmodectl, rpinclimit, rpdeclimit; 1074 u32 rpstat, cagf, reqf; 1075 u32 rpupei, rpcurup, rpprevup; 1076 u32 rpdownei, rpcurdown, rpprevdown; 1077 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1078 int max_freq; 1079 1080 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1081 if (IS_GEN9_LP(dev_priv)) { 1082 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1083 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1084 } else { 1085 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1086 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1087 } 1088 1089 /* RPSTAT1 is in the GT power well */ 1090 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1091 1092 reqf = I915_READ(GEN6_RPNSWREQ); 1093 if (INTEL_GEN(dev_priv) >= 9) 1094 reqf >>= 23; 1095 else { 1096 reqf &= ~GEN6_TURBO_DISABLE; 1097 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1098 reqf >>= 24; 1099 else 1100 reqf >>= 25; 1101 } 1102 reqf = intel_gpu_freq(dev_priv, reqf); 1103 1104 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1105 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1106 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1107 1108 rpstat = I915_READ(GEN6_RPSTAT1); 1109 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 1110 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 1111 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 1112 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 1113 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 1114 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 1115 if (INTEL_GEN(dev_priv) >= 9) 1116 cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; 1117 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1118 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; 1119 else 1120 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; 1121 cagf = intel_gpu_freq(dev_priv, cagf); 1122 1123 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1124 1125 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 1126 pm_ier = I915_READ(GEN6_PMIER); 1127 pm_imr = I915_READ(GEN6_PMIMR); 1128 pm_isr = I915_READ(GEN6_PMISR); 1129 pm_iir = I915_READ(GEN6_PMIIR); 1130 pm_mask = I915_READ(GEN6_PMINTRMSK); 1131 } else { 1132 pm_ier = I915_READ(GEN8_GT_IER(2)); 1133 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1134 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1135 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1136 pm_mask = I915_READ(GEN6_PMINTRMSK); 1137 } 1138 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1139 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1140 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", 1141 dev_priv->rps.pm_intrmsk_mbz); 1142 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1143 seq_printf(m, "Render p-state ratio: %d\n", 1144 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 1145 seq_printf(m, "Render p-state VID: %d\n", 1146 gt_perf_status & 0xff); 1147 seq_printf(m, "Render p-state limit: %d\n", 1148 rp_state_limits & 0xff); 1149 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1150 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1151 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1152 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1153 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1154 seq_printf(m, "CAGF: %dMHz\n", cagf); 1155 seq_printf(m, "RP CUR UP EI: %d (%dus)\n", 1156 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei)); 1157 seq_printf(m, "RP CUR UP: %d (%dus)\n", 1158 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); 1159 seq_printf(m, "RP PREV UP: %d (%dus)\n", 1160 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); 1161 seq_printf(m, "Up threshold: %d%%\n", 1162 dev_priv->rps.up_threshold); 1163 1164 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", 1165 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); 1166 seq_printf(m, "RP CUR DOWN: %d (%dus)\n", 1167 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); 1168 seq_printf(m, "RP PREV DOWN: %d (%dus)\n", 1169 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); 1170 seq_printf(m, "Down threshold: %d%%\n", 1171 dev_priv->rps.down_threshold); 1172 1173 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : 1174 rp_state_cap >> 16) & 0xff; 1175 max_freq *= (IS_GEN9_BC(dev_priv) || 1176 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1177 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1178 intel_gpu_freq(dev_priv, max_freq)); 1179 1180 max_freq = (rp_state_cap & 0xff00) >> 8; 1181 max_freq *= (IS_GEN9_BC(dev_priv) || 1182 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1183 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1184 intel_gpu_freq(dev_priv, max_freq)); 1185 1186 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : 1187 rp_state_cap >> 0) & 0xff; 1188 max_freq *= (IS_GEN9_BC(dev_priv) || 1189 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1190 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1191 intel_gpu_freq(dev_priv, max_freq)); 1192 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1193 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1194 1195 seq_printf(m, "Current freq: %d MHz\n", 1196 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 1197 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1198 seq_printf(m, "Idle freq: %d MHz\n", 1199 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); 1200 seq_printf(m, "Min freq: %d MHz\n", 1201 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); 1202 seq_printf(m, "Boost freq: %d MHz\n", 1203 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); 1204 seq_printf(m, "Max freq: %d MHz\n", 1205 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 1206 seq_printf(m, 1207 "efficient (RPe) frequency: %d MHz\n", 1208 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); 1209 } else { 1210 seq_puts(m, "no P-state info available\n"); 1211 } 1212 1213 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk); 1214 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1215 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1216 1217 intel_runtime_pm_put(dev_priv); 1218 return ret; 1219 } 1220 1221 static void i915_instdone_info(struct drm_i915_private *dev_priv, 1222 struct seq_file *m, 1223 struct intel_instdone *instdone) 1224 { 1225 int slice; 1226 int subslice; 1227 1228 seq_printf(m, "\t\tINSTDONE: 0x%08x\n", 1229 instdone->instdone); 1230 1231 if (INTEL_GEN(dev_priv) <= 3) 1232 return; 1233 1234 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n", 1235 instdone->slice_common); 1236 1237 if (INTEL_GEN(dev_priv) <= 6) 1238 return; 1239 1240 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1241 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 1242 slice, subslice, instdone->sampler[slice][subslice]); 1243 1244 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1245 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", 1246 slice, subslice, instdone->row[slice][subslice]); 1247 } 1248 1249 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1250 { 1251 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1252 struct intel_engine_cs *engine; 1253 u64 acthd[I915_NUM_ENGINES]; 1254 u32 seqno[I915_NUM_ENGINES]; 1255 struct intel_instdone instdone; 1256 enum intel_engine_id id; 1257 1258 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 1259 seq_puts(m, "Wedged\n"); 1260 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) 1261 seq_puts(m, "Reset in progress: struct_mutex backoff\n"); 1262 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags)) 1263 seq_puts(m, "Reset in progress: reset handoff to waiter\n"); 1264 if (waitqueue_active(&dev_priv->gpu_error.wait_queue)) 1265 seq_puts(m, "Waiter holding struct mutex\n"); 1266 if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) 1267 seq_puts(m, "struct_mutex blocked for reset\n"); 1268 1269 if (!i915.enable_hangcheck) { 1270 seq_puts(m, "Hangcheck disabled\n"); 1271 return 0; 1272 } 1273 1274 intel_runtime_pm_get(dev_priv); 1275 1276 for_each_engine(engine, dev_priv, id) { 1277 acthd[id] = intel_engine_get_active_head(engine); 1278 seqno[id] = intel_engine_get_seqno(engine); 1279 } 1280 1281 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); 1282 1283 intel_runtime_pm_put(dev_priv); 1284 1285 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) 1286 seq_printf(m, "Hangcheck active, timer fires in %dms\n", 1287 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1288 jiffies)); 1289 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) 1290 seq_puts(m, "Hangcheck active, work pending\n"); 1291 else 1292 seq_puts(m, "Hangcheck inactive\n"); 1293 1294 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); 1295 1296 for_each_engine(engine, dev_priv, id) { 1297 struct intel_breadcrumbs *b = &engine->breadcrumbs; 1298 struct rb_node *rb; 1299 1300 seq_printf(m, "%s:\n", engine->name); 1301 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n", 1302 engine->hangcheck.seqno, seqno[id], 1303 intel_engine_last_submit(engine), 1304 engine->timeline->inflight_seqnos); 1305 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n", 1306 yesno(intel_engine_has_waiter(engine)), 1307 yesno(test_bit(engine->id, 1308 &dev_priv->gpu_error.missed_irq_rings)), 1309 yesno(engine->hangcheck.stalled)); 1310 1311 spin_lock_irq(&b->rb_lock); 1312 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 1313 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 1314 1315 seq_printf(m, "\t%s [%d] waiting for %x\n", 1316 w->tsk->comm, w->tsk->pid, w->seqno); 1317 } 1318 spin_unlock_irq(&b->rb_lock); 1319 1320 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1321 (long long)engine->hangcheck.acthd, 1322 (long long)acthd[id]); 1323 seq_printf(m, "\taction = %s(%d) %d ms ago\n", 1324 hangcheck_action_to_str(engine->hangcheck.action), 1325 engine->hangcheck.action, 1326 jiffies_to_msecs(jiffies - 1327 engine->hangcheck.action_timestamp)); 1328 1329 if (engine->id == RCS) { 1330 seq_puts(m, "\tinstdone read =\n"); 1331 1332 i915_instdone_info(dev_priv, m, &instdone); 1333 1334 seq_puts(m, "\tinstdone accu =\n"); 1335 1336 i915_instdone_info(dev_priv, m, 1337 &engine->hangcheck.instdone); 1338 } 1339 } 1340 1341 return 0; 1342 } 1343 1344 static int i915_reset_info(struct seq_file *m, void *unused) 1345 { 1346 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1347 struct i915_gpu_error *error = &dev_priv->gpu_error; 1348 struct intel_engine_cs *engine; 1349 enum intel_engine_id id; 1350 1351 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error)); 1352 1353 for_each_engine(engine, dev_priv, id) { 1354 seq_printf(m, "%s = %u\n", engine->name, 1355 i915_reset_engine_count(error, engine)); 1356 } 1357 1358 return 0; 1359 } 1360 1361 static int ironlake_drpc_info(struct seq_file *m) 1362 { 1363 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1364 u32 rgvmodectl, rstdbyctl; 1365 u16 crstandvid; 1366 1367 rgvmodectl = I915_READ(MEMMODECTL); 1368 rstdbyctl = I915_READ(RSTDBYCTL); 1369 crstandvid = I915_READ16(CRSTANDVID); 1370 1371 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); 1372 seq_printf(m, "Boost freq: %d\n", 1373 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1374 MEMMODE_BOOST_FREQ_SHIFT); 1375 seq_printf(m, "HW control enabled: %s\n", 1376 yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); 1377 seq_printf(m, "SW control enabled: %s\n", 1378 yesno(rgvmodectl & MEMMODE_SWMODE_EN)); 1379 seq_printf(m, "Gated voltage change: %s\n", 1380 yesno(rgvmodectl & MEMMODE_RCLK_GATE)); 1381 seq_printf(m, "Starting frequency: P%d\n", 1382 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1383 seq_printf(m, "Max P-state: P%d\n", 1384 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1385 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1386 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1387 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1388 seq_printf(m, "Render standby enabled: %s\n", 1389 yesno(!(rstdbyctl & RCX_SW_EXIT))); 1390 seq_puts(m, "Current RS state: "); 1391 switch (rstdbyctl & RSX_STATUS_MASK) { 1392 case RSX_STATUS_ON: 1393 seq_puts(m, "on\n"); 1394 break; 1395 case RSX_STATUS_RC1: 1396 seq_puts(m, "RC1\n"); 1397 break; 1398 case RSX_STATUS_RC1E: 1399 seq_puts(m, "RC1E\n"); 1400 break; 1401 case RSX_STATUS_RS1: 1402 seq_puts(m, "RS1\n"); 1403 break; 1404 case RSX_STATUS_RS2: 1405 seq_puts(m, "RS2 (RC6)\n"); 1406 break; 1407 case RSX_STATUS_RS3: 1408 seq_puts(m, "RC3 (RC6+)\n"); 1409 break; 1410 default: 1411 seq_puts(m, "unknown\n"); 1412 break; 1413 } 1414 1415 return 0; 1416 } 1417 1418 static int i915_forcewake_domains(struct seq_file *m, void *data) 1419 { 1420 struct drm_i915_private *i915 = node_to_i915(m->private); 1421 struct intel_uncore_forcewake_domain *fw_domain; 1422 unsigned int tmp; 1423 1424 for_each_fw_domain(fw_domain, i915, tmp) 1425 seq_printf(m, "%s.wake_count = %u\n", 1426 intel_uncore_forcewake_domain_to_str(fw_domain->id), 1427 READ_ONCE(fw_domain->wake_count)); 1428 1429 return 0; 1430 } 1431 1432 static void print_rc6_res(struct seq_file *m, 1433 const char *title, 1434 const i915_reg_t reg) 1435 { 1436 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1437 1438 seq_printf(m, "%s %u (%llu us)\n", 1439 title, I915_READ(reg), 1440 intel_rc6_residency_us(dev_priv, reg)); 1441 } 1442 1443 static int vlv_drpc_info(struct seq_file *m) 1444 { 1445 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1446 u32 rpmodectl1, rcctl1, pw_status; 1447 1448 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1449 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1450 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1451 1452 seq_printf(m, "Video Turbo Mode: %s\n", 1453 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1454 seq_printf(m, "Turbo enabled: %s\n", 1455 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1456 seq_printf(m, "HW control enabled: %s\n", 1457 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1458 seq_printf(m, "SW control enabled: %s\n", 1459 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1460 GEN6_RP_MEDIA_SW_MODE)); 1461 seq_printf(m, "RC6 Enabled: %s\n", 1462 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1463 GEN6_RC_CTL_EI_MODE(1)))); 1464 seq_printf(m, "Render Power Well: %s\n", 1465 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1466 seq_printf(m, "Media Power Well: %s\n", 1467 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1468 1469 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6); 1470 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6); 1471 1472 return i915_forcewake_domains(m, NULL); 1473 } 1474 1475 static int gen6_drpc_info(struct seq_file *m) 1476 { 1477 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1478 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1479 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; 1480 unsigned forcewake_count; 1481 int count = 0; 1482 1483 forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count); 1484 if (forcewake_count) { 1485 seq_puts(m, "RC information inaccurate because somebody " 1486 "holds a forcewake reference \n"); 1487 } else { 1488 /* NB: we cannot use forcewake, else we read the wrong values */ 1489 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1490 udelay(10); 1491 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1492 } 1493 1494 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1495 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1496 1497 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1498 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1499 if (INTEL_GEN(dev_priv) >= 9) { 1500 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE); 1501 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); 1502 } 1503 1504 mutex_lock(&dev_priv->rps.hw_lock); 1505 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1506 mutex_unlock(&dev_priv->rps.hw_lock); 1507 1508 seq_printf(m, "Video Turbo Mode: %s\n", 1509 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1510 seq_printf(m, "HW control enabled: %s\n", 1511 yesno(rpmodectl1 & GEN6_RP_ENABLE)); 1512 seq_printf(m, "SW control enabled: %s\n", 1513 yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) == 1514 GEN6_RP_MEDIA_SW_MODE)); 1515 seq_printf(m, "RC1e Enabled: %s\n", 1516 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1517 seq_printf(m, "RC6 Enabled: %s\n", 1518 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1519 if (INTEL_GEN(dev_priv) >= 9) { 1520 seq_printf(m, "Render Well Gating Enabled: %s\n", 1521 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); 1522 seq_printf(m, "Media Well Gating Enabled: %s\n", 1523 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); 1524 } 1525 seq_printf(m, "Deep RC6 Enabled: %s\n", 1526 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1527 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1528 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1529 seq_puts(m, "Current RC state: "); 1530 switch (gt_core_status & GEN6_RCn_MASK) { 1531 case GEN6_RC0: 1532 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1533 seq_puts(m, "Core Power Down\n"); 1534 else 1535 seq_puts(m, "on\n"); 1536 break; 1537 case GEN6_RC3: 1538 seq_puts(m, "RC3\n"); 1539 break; 1540 case GEN6_RC6: 1541 seq_puts(m, "RC6\n"); 1542 break; 1543 case GEN6_RC7: 1544 seq_puts(m, "RC7\n"); 1545 break; 1546 default: 1547 seq_puts(m, "Unknown\n"); 1548 break; 1549 } 1550 1551 seq_printf(m, "Core Power Down: %s\n", 1552 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1553 if (INTEL_GEN(dev_priv) >= 9) { 1554 seq_printf(m, "Render Power Well: %s\n", 1555 (gen9_powergate_status & 1556 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); 1557 seq_printf(m, "Media Power Well: %s\n", 1558 (gen9_powergate_status & 1559 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1560 } 1561 1562 /* Not exactly sure what this is */ 1563 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:", 1564 GEN6_GT_GFX_RC6_LOCKED); 1565 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6); 1566 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); 1567 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); 1568 1569 seq_printf(m, "RC6 voltage: %dmV\n", 1570 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1571 seq_printf(m, "RC6+ voltage: %dmV\n", 1572 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1573 seq_printf(m, "RC6++ voltage: %dmV\n", 1574 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1575 return i915_forcewake_domains(m, NULL); 1576 } 1577 1578 static int i915_drpc_info(struct seq_file *m, void *unused) 1579 { 1580 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1581 int err; 1582 1583 intel_runtime_pm_get(dev_priv); 1584 1585 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1586 err = vlv_drpc_info(m); 1587 else if (INTEL_GEN(dev_priv) >= 6) 1588 err = gen6_drpc_info(m); 1589 else 1590 err = ironlake_drpc_info(m); 1591 1592 intel_runtime_pm_put(dev_priv); 1593 1594 return err; 1595 } 1596 1597 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1598 { 1599 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1600 1601 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1602 dev_priv->fb_tracking.busy_bits); 1603 1604 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1605 dev_priv->fb_tracking.flip_bits); 1606 1607 return 0; 1608 } 1609 1610 static int i915_fbc_status(struct seq_file *m, void *unused) 1611 { 1612 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1613 1614 if (!HAS_FBC(dev_priv)) { 1615 seq_puts(m, "FBC unsupported on this chipset\n"); 1616 return 0; 1617 } 1618 1619 intel_runtime_pm_get(dev_priv); 1620 mutex_lock(&dev_priv->fbc.lock); 1621 1622 if (intel_fbc_is_active(dev_priv)) 1623 seq_puts(m, "FBC enabled\n"); 1624 else 1625 seq_printf(m, "FBC disabled: %s\n", 1626 dev_priv->fbc.no_fbc_reason); 1627 1628 if (intel_fbc_is_active(dev_priv)) { 1629 u32 mask; 1630 1631 if (INTEL_GEN(dev_priv) >= 8) 1632 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; 1633 else if (INTEL_GEN(dev_priv) >= 7) 1634 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; 1635 else if (INTEL_GEN(dev_priv) >= 5) 1636 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; 1637 else if (IS_G4X(dev_priv)) 1638 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK; 1639 else 1640 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING | 1641 FBC_STAT_COMPRESSED); 1642 1643 seq_printf(m, "Compressing: %s\n", yesno(mask)); 1644 } 1645 1646 mutex_unlock(&dev_priv->fbc.lock); 1647 intel_runtime_pm_put(dev_priv); 1648 1649 return 0; 1650 } 1651 1652 static int i915_fbc_false_color_get(void *data, u64 *val) 1653 { 1654 struct drm_i915_private *dev_priv = data; 1655 1656 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1657 return -ENODEV; 1658 1659 *val = dev_priv->fbc.false_color; 1660 1661 return 0; 1662 } 1663 1664 static int i915_fbc_false_color_set(void *data, u64 val) 1665 { 1666 struct drm_i915_private *dev_priv = data; 1667 u32 reg; 1668 1669 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1670 return -ENODEV; 1671 1672 mutex_lock(&dev_priv->fbc.lock); 1673 1674 reg = I915_READ(ILK_DPFC_CONTROL); 1675 dev_priv->fbc.false_color = val; 1676 1677 I915_WRITE(ILK_DPFC_CONTROL, val ? 1678 (reg | FBC_CTL_FALSE_COLOR) : 1679 (reg & ~FBC_CTL_FALSE_COLOR)); 1680 1681 mutex_unlock(&dev_priv->fbc.lock); 1682 return 0; 1683 } 1684 1685 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, 1686 i915_fbc_false_color_get, i915_fbc_false_color_set, 1687 "%llu\n"); 1688 1689 static int i915_ips_status(struct seq_file *m, void *unused) 1690 { 1691 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1692 1693 if (!HAS_IPS(dev_priv)) { 1694 seq_puts(m, "not supported\n"); 1695 return 0; 1696 } 1697 1698 intel_runtime_pm_get(dev_priv); 1699 1700 seq_printf(m, "Enabled by kernel parameter: %s\n", 1701 yesno(i915.enable_ips)); 1702 1703 if (INTEL_GEN(dev_priv) >= 8) { 1704 seq_puts(m, "Currently: unknown\n"); 1705 } else { 1706 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1707 seq_puts(m, "Currently: enabled\n"); 1708 else 1709 seq_puts(m, "Currently: disabled\n"); 1710 } 1711 1712 intel_runtime_pm_put(dev_priv); 1713 1714 return 0; 1715 } 1716 1717 static int i915_sr_status(struct seq_file *m, void *unused) 1718 { 1719 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1720 bool sr_enabled = false; 1721 1722 intel_runtime_pm_get(dev_priv); 1723 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 1724 1725 if (INTEL_GEN(dev_priv) >= 9) 1726 /* no global SR status; inspect per-plane WM */; 1727 else if (HAS_PCH_SPLIT(dev_priv)) 1728 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1729 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 1730 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 1731 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1732 else if (IS_I915GM(dev_priv)) 1733 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1734 else if (IS_PINEVIEW(dev_priv)) 1735 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1736 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1737 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1738 1739 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1740 intel_runtime_pm_put(dev_priv); 1741 1742 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); 1743 1744 return 0; 1745 } 1746 1747 static int i915_emon_status(struct seq_file *m, void *unused) 1748 { 1749 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1750 struct drm_device *dev = &dev_priv->drm; 1751 unsigned long temp, chipset, gfx; 1752 int ret; 1753 1754 if (!IS_GEN5(dev_priv)) 1755 return -ENODEV; 1756 1757 ret = mutex_lock_interruptible(&dev->struct_mutex); 1758 if (ret) 1759 return ret; 1760 1761 temp = i915_mch_val(dev_priv); 1762 chipset = i915_chipset_val(dev_priv); 1763 gfx = i915_gfx_val(dev_priv); 1764 mutex_unlock(&dev->struct_mutex); 1765 1766 seq_printf(m, "GMCH temp: %ld\n", temp); 1767 seq_printf(m, "Chipset power: %ld\n", chipset); 1768 seq_printf(m, "GFX power: %ld\n", gfx); 1769 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1770 1771 return 0; 1772 } 1773 1774 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1775 { 1776 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1777 int ret = 0; 1778 int gpu_freq, ia_freq; 1779 unsigned int max_gpu_freq, min_gpu_freq; 1780 1781 if (!HAS_LLC(dev_priv)) { 1782 seq_puts(m, "unsupported on this chipset\n"); 1783 return 0; 1784 } 1785 1786 intel_runtime_pm_get(dev_priv); 1787 1788 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 1789 if (ret) 1790 goto out; 1791 1792 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { 1793 /* Convert GT frequency to 50 HZ units */ 1794 min_gpu_freq = 1795 dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER; 1796 max_gpu_freq = 1797 dev_priv->rps.max_freq_softlimit / GEN9_FREQ_SCALER; 1798 } else { 1799 min_gpu_freq = dev_priv->rps.min_freq_softlimit; 1800 max_gpu_freq = dev_priv->rps.max_freq_softlimit; 1801 } 1802 1803 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1804 1805 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1806 ia_freq = gpu_freq; 1807 sandybridge_pcode_read(dev_priv, 1808 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1809 &ia_freq); 1810 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1811 intel_gpu_freq(dev_priv, (gpu_freq * 1812 (IS_GEN9_BC(dev_priv) || 1813 IS_CANNONLAKE(dev_priv) ? 1814 GEN9_FREQ_SCALER : 1))), 1815 ((ia_freq >> 0) & 0xff) * 100, 1816 ((ia_freq >> 8) & 0xff) * 100); 1817 } 1818 1819 mutex_unlock(&dev_priv->rps.hw_lock); 1820 1821 out: 1822 intel_runtime_pm_put(dev_priv); 1823 return ret; 1824 } 1825 1826 static int i915_opregion(struct seq_file *m, void *unused) 1827 { 1828 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1829 struct drm_device *dev = &dev_priv->drm; 1830 struct intel_opregion *opregion = &dev_priv->opregion; 1831 int ret; 1832 1833 ret = mutex_lock_interruptible(&dev->struct_mutex); 1834 if (ret) 1835 goto out; 1836 1837 if (opregion->header) 1838 seq_write(m, opregion->header, OPREGION_SIZE); 1839 1840 mutex_unlock(&dev->struct_mutex); 1841 1842 out: 1843 return 0; 1844 } 1845 1846 static int i915_vbt(struct seq_file *m, void *unused) 1847 { 1848 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 1849 1850 if (opregion->vbt) 1851 seq_write(m, opregion->vbt, opregion->vbt_size); 1852 1853 return 0; 1854 } 1855 1856 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1857 { 1858 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1859 struct drm_device *dev = &dev_priv->drm; 1860 struct intel_framebuffer *fbdev_fb = NULL; 1861 struct drm_framebuffer *drm_fb; 1862 int ret; 1863 1864 ret = mutex_lock_interruptible(&dev->struct_mutex); 1865 if (ret) 1866 return ret; 1867 1868 #ifdef CONFIG_DRM_FBDEV_EMULATION 1869 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { 1870 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); 1871 1872 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1873 fbdev_fb->base.width, 1874 fbdev_fb->base.height, 1875 fbdev_fb->base.format->depth, 1876 fbdev_fb->base.format->cpp[0] * 8, 1877 fbdev_fb->base.modifier, 1878 drm_framebuffer_read_refcount(&fbdev_fb->base)); 1879 describe_obj(m, fbdev_fb->obj); 1880 seq_putc(m, '\n'); 1881 } 1882 #endif 1883 1884 mutex_lock(&dev->mode_config.fb_lock); 1885 drm_for_each_fb(drm_fb, dev) { 1886 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 1887 if (fb == fbdev_fb) 1888 continue; 1889 1890 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1891 fb->base.width, 1892 fb->base.height, 1893 fb->base.format->depth, 1894 fb->base.format->cpp[0] * 8, 1895 fb->base.modifier, 1896 drm_framebuffer_read_refcount(&fb->base)); 1897 describe_obj(m, fb->obj); 1898 seq_putc(m, '\n'); 1899 } 1900 mutex_unlock(&dev->mode_config.fb_lock); 1901 mutex_unlock(&dev->struct_mutex); 1902 1903 return 0; 1904 } 1905 1906 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) 1907 { 1908 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)", 1909 ring->space, ring->head, ring->tail); 1910 } 1911 1912 static int i915_context_status(struct seq_file *m, void *unused) 1913 { 1914 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1915 struct drm_device *dev = &dev_priv->drm; 1916 struct intel_engine_cs *engine; 1917 struct i915_gem_context *ctx; 1918 enum intel_engine_id id; 1919 int ret; 1920 1921 ret = mutex_lock_interruptible(&dev->struct_mutex); 1922 if (ret) 1923 return ret; 1924 1925 list_for_each_entry(ctx, &dev_priv->contexts.list, link) { 1926 seq_printf(m, "HW context %u ", ctx->hw_id); 1927 if (ctx->pid) { 1928 struct task_struct *task; 1929 1930 task = get_pid_task(ctx->pid, PIDTYPE_PID); 1931 if (task) { 1932 seq_printf(m, "(%s [%d]) ", 1933 task->comm, task->pid); 1934 put_task_struct(task); 1935 } 1936 } else if (IS_ERR(ctx->file_priv)) { 1937 seq_puts(m, "(deleted) "); 1938 } else { 1939 seq_puts(m, "(kernel) "); 1940 } 1941 1942 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 1943 seq_putc(m, '\n'); 1944 1945 for_each_engine(engine, dev_priv, id) { 1946 struct intel_context *ce = &ctx->engine[engine->id]; 1947 1948 seq_printf(m, "%s: ", engine->name); 1949 seq_putc(m, ce->initialised ? 'I' : 'i'); 1950 if (ce->state) 1951 describe_obj(m, ce->state->obj); 1952 if (ce->ring) 1953 describe_ctx_ring(m, ce->ring); 1954 seq_putc(m, '\n'); 1955 } 1956 1957 seq_putc(m, '\n'); 1958 } 1959 1960 mutex_unlock(&dev->struct_mutex); 1961 1962 return 0; 1963 } 1964 1965 static void i915_dump_lrc_obj(struct seq_file *m, 1966 struct i915_gem_context *ctx, 1967 struct intel_engine_cs *engine) 1968 { 1969 struct i915_vma *vma = ctx->engine[engine->id].state; 1970 struct page *page; 1971 int j; 1972 1973 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id); 1974 1975 if (!vma) { 1976 seq_puts(m, "\tFake context\n"); 1977 return; 1978 } 1979 1980 if (vma->flags & I915_VMA_GLOBAL_BIND) 1981 seq_printf(m, "\tBound in GGTT at 0x%08x\n", 1982 i915_ggtt_offset(vma)); 1983 1984 if (i915_gem_object_pin_pages(vma->obj)) { 1985 seq_puts(m, "\tFailed to get pages for context object\n\n"); 1986 return; 1987 } 1988 1989 page = i915_gem_object_get_page(vma->obj, LRC_STATE_PN); 1990 if (page) { 1991 u32 *reg_state = kmap_atomic(page); 1992 1993 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) { 1994 seq_printf(m, 1995 "\t[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n", 1996 j * 4, 1997 reg_state[j], reg_state[j + 1], 1998 reg_state[j + 2], reg_state[j + 3]); 1999 } 2000 kunmap_atomic(reg_state); 2001 } 2002 2003 i915_gem_object_unpin_pages(vma->obj); 2004 seq_putc(m, '\n'); 2005 } 2006 2007 static int i915_dump_lrc(struct seq_file *m, void *unused) 2008 { 2009 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2010 struct drm_device *dev = &dev_priv->drm; 2011 struct intel_engine_cs *engine; 2012 struct i915_gem_context *ctx; 2013 enum intel_engine_id id; 2014 int ret; 2015 2016 if (!i915.enable_execlists) { 2017 seq_printf(m, "Logical Ring Contexts are disabled\n"); 2018 return 0; 2019 } 2020 2021 ret = mutex_lock_interruptible(&dev->struct_mutex); 2022 if (ret) 2023 return ret; 2024 2025 list_for_each_entry(ctx, &dev_priv->contexts.list, link) 2026 for_each_engine(engine, dev_priv, id) 2027 i915_dump_lrc_obj(m, ctx, engine); 2028 2029 mutex_unlock(&dev->struct_mutex); 2030 2031 return 0; 2032 } 2033 2034 static const char *swizzle_string(unsigned swizzle) 2035 { 2036 switch (swizzle) { 2037 case I915_BIT_6_SWIZZLE_NONE: 2038 return "none"; 2039 case I915_BIT_6_SWIZZLE_9: 2040 return "bit9"; 2041 case I915_BIT_6_SWIZZLE_9_10: 2042 return "bit9/bit10"; 2043 case I915_BIT_6_SWIZZLE_9_11: 2044 return "bit9/bit11"; 2045 case I915_BIT_6_SWIZZLE_9_10_11: 2046 return "bit9/bit10/bit11"; 2047 case I915_BIT_6_SWIZZLE_9_17: 2048 return "bit9/bit17"; 2049 case I915_BIT_6_SWIZZLE_9_10_17: 2050 return "bit9/bit10/bit17"; 2051 case I915_BIT_6_SWIZZLE_UNKNOWN: 2052 return "unknown"; 2053 } 2054 2055 return "bug"; 2056 } 2057 2058 static int i915_swizzle_info(struct seq_file *m, void *data) 2059 { 2060 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2061 2062 intel_runtime_pm_get(dev_priv); 2063 2064 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 2065 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 2066 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 2067 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 2068 2069 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) { 2070 seq_printf(m, "DDC = 0x%08x\n", 2071 I915_READ(DCC)); 2072 seq_printf(m, "DDC2 = 0x%08x\n", 2073 I915_READ(DCC2)); 2074 seq_printf(m, "C0DRB3 = 0x%04x\n", 2075 I915_READ16(C0DRB3)); 2076 seq_printf(m, "C1DRB3 = 0x%04x\n", 2077 I915_READ16(C1DRB3)); 2078 } else if (INTEL_GEN(dev_priv) >= 6) { 2079 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 2080 I915_READ(MAD_DIMM_C0)); 2081 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 2082 I915_READ(MAD_DIMM_C1)); 2083 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 2084 I915_READ(MAD_DIMM_C2)); 2085 seq_printf(m, "TILECTL = 0x%08x\n", 2086 I915_READ(TILECTL)); 2087 if (INTEL_GEN(dev_priv) >= 8) 2088 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2089 I915_READ(GAMTARBMODE)); 2090 else 2091 seq_printf(m, "ARB_MODE = 0x%08x\n", 2092 I915_READ(ARB_MODE)); 2093 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2094 I915_READ(DISP_ARB_CTL)); 2095 } 2096 2097 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 2098 seq_puts(m, "L-shaped memory detected\n"); 2099 2100 intel_runtime_pm_put(dev_priv); 2101 2102 return 0; 2103 } 2104 2105 static int per_file_ctx(int id, void *ptr, void *data) 2106 { 2107 struct i915_gem_context *ctx = ptr; 2108 struct seq_file *m = data; 2109 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2110 2111 if (!ppgtt) { 2112 seq_printf(m, " no ppgtt for context %d\n", 2113 ctx->user_handle); 2114 return 0; 2115 } 2116 2117 if (i915_gem_context_is_default(ctx)) 2118 seq_puts(m, " default context:\n"); 2119 else 2120 seq_printf(m, " context %d:\n", ctx->user_handle); 2121 ppgtt->debug_dump(ppgtt, m); 2122 2123 return 0; 2124 } 2125 2126 static void gen8_ppgtt_info(struct seq_file *m, 2127 struct drm_i915_private *dev_priv) 2128 { 2129 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2130 struct intel_engine_cs *engine; 2131 enum intel_engine_id id; 2132 int i; 2133 2134 if (!ppgtt) 2135 return; 2136 2137 for_each_engine(engine, dev_priv, id) { 2138 seq_printf(m, "%s\n", engine->name); 2139 for (i = 0; i < 4; i++) { 2140 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); 2141 pdp <<= 32; 2142 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); 2143 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2144 } 2145 } 2146 } 2147 2148 static void gen6_ppgtt_info(struct seq_file *m, 2149 struct drm_i915_private *dev_priv) 2150 { 2151 struct intel_engine_cs *engine; 2152 enum intel_engine_id id; 2153 2154 if (IS_GEN6(dev_priv)) 2155 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2156 2157 for_each_engine(engine, dev_priv, id) { 2158 seq_printf(m, "%s\n", engine->name); 2159 if (IS_GEN7(dev_priv)) 2160 seq_printf(m, "GFX_MODE: 0x%08x\n", 2161 I915_READ(RING_MODE_GEN7(engine))); 2162 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2163 I915_READ(RING_PP_DIR_BASE(engine))); 2164 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", 2165 I915_READ(RING_PP_DIR_BASE_READ(engine))); 2166 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", 2167 I915_READ(RING_PP_DIR_DCLV(engine))); 2168 } 2169 if (dev_priv->mm.aliasing_ppgtt) { 2170 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2171 2172 seq_puts(m, "aliasing PPGTT:\n"); 2173 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2174 2175 ppgtt->debug_dump(ppgtt, m); 2176 } 2177 2178 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2179 } 2180 2181 static int i915_ppgtt_info(struct seq_file *m, void *data) 2182 { 2183 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2184 struct drm_device *dev = &dev_priv->drm; 2185 struct drm_file *file; 2186 int ret; 2187 2188 mutex_lock(&dev->filelist_mutex); 2189 ret = mutex_lock_interruptible(&dev->struct_mutex); 2190 if (ret) 2191 goto out_unlock; 2192 2193 intel_runtime_pm_get(dev_priv); 2194 2195 if (INTEL_GEN(dev_priv) >= 8) 2196 gen8_ppgtt_info(m, dev_priv); 2197 else if (INTEL_GEN(dev_priv) >= 6) 2198 gen6_ppgtt_info(m, dev_priv); 2199 2200 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2201 struct drm_i915_file_private *file_priv = file->driver_priv; 2202 struct task_struct *task; 2203 2204 task = get_pid_task(file->pid, PIDTYPE_PID); 2205 if (!task) { 2206 ret = -ESRCH; 2207 goto out_rpm; 2208 } 2209 seq_printf(m, "\nproc: %s\n", task->comm); 2210 put_task_struct(task); 2211 idr_for_each(&file_priv->context_idr, per_file_ctx, 2212 (void *)(unsigned long)m); 2213 } 2214 2215 out_rpm: 2216 intel_runtime_pm_put(dev_priv); 2217 mutex_unlock(&dev->struct_mutex); 2218 out_unlock: 2219 mutex_unlock(&dev->filelist_mutex); 2220 return ret; 2221 } 2222 2223 static int count_irq_waiters(struct drm_i915_private *i915) 2224 { 2225 struct intel_engine_cs *engine; 2226 enum intel_engine_id id; 2227 int count = 0; 2228 2229 for_each_engine(engine, i915, id) 2230 count += intel_engine_has_waiter(engine); 2231 2232 return count; 2233 } 2234 2235 static const char *rps_power_to_str(unsigned int power) 2236 { 2237 static const char * const strings[] = { 2238 [LOW_POWER] = "low power", 2239 [BETWEEN] = "mixed", 2240 [HIGH_POWER] = "high power", 2241 }; 2242 2243 if (power >= ARRAY_SIZE(strings) || !strings[power]) 2244 return "unknown"; 2245 2246 return strings[power]; 2247 } 2248 2249 static int i915_rps_boost_info(struct seq_file *m, void *data) 2250 { 2251 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2252 struct drm_device *dev = &dev_priv->drm; 2253 struct drm_file *file; 2254 2255 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); 2256 seq_printf(m, "GPU busy? %s [%d requests]\n", 2257 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); 2258 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2259 seq_printf(m, "Boosts outstanding? %d\n", 2260 atomic_read(&dev_priv->rps.num_waiters)); 2261 seq_printf(m, "Frequency requested %d\n", 2262 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq)); 2263 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2264 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 2265 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit), 2266 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit), 2267 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); 2268 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 2269 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 2270 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 2271 intel_gpu_freq(dev_priv, dev_priv->rps.boost_freq)); 2272 2273 mutex_lock(&dev->filelist_mutex); 2274 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2275 struct drm_i915_file_private *file_priv = file->driver_priv; 2276 struct task_struct *task; 2277 2278 rcu_read_lock(); 2279 task = pid_task(file->pid, PIDTYPE_PID); 2280 seq_printf(m, "%s [%d]: %d boosts\n", 2281 task ? task->comm : "<unknown>", 2282 task ? task->pid : -1, 2283 atomic_read(&file_priv->rps.boosts)); 2284 rcu_read_unlock(); 2285 } 2286 seq_printf(m, "Kernel (anonymous) boosts: %d\n", 2287 atomic_read(&dev_priv->rps.boosts)); 2288 mutex_unlock(&dev->filelist_mutex); 2289 2290 if (INTEL_GEN(dev_priv) >= 6 && 2291 dev_priv->rps.enabled && 2292 dev_priv->gt.active_requests) { 2293 u32 rpup, rpupei; 2294 u32 rpdown, rpdownei; 2295 2296 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2297 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; 2298 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; 2299 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; 2300 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; 2301 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2302 2303 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", 2304 rps_power_to_str(dev_priv->rps.power)); 2305 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", 2306 rpup && rpupei ? 100 * rpup / rpupei : 0, 2307 dev_priv->rps.up_threshold); 2308 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", 2309 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, 2310 dev_priv->rps.down_threshold); 2311 } else { 2312 seq_puts(m, "\nRPS Autotuning inactive\n"); 2313 } 2314 2315 return 0; 2316 } 2317 2318 static int i915_llc(struct seq_file *m, void *data) 2319 { 2320 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2321 const bool edram = INTEL_GEN(dev_priv) > 8; 2322 2323 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv))); 2324 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC", 2325 intel_uncore_edram_size(dev_priv)/1024/1024); 2326 2327 return 0; 2328 } 2329 2330 static int i915_huc_load_status_info(struct seq_file *m, void *data) 2331 { 2332 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2333 struct intel_uc_fw *huc_fw = &dev_priv->huc.fw; 2334 2335 if (!HAS_HUC_UCODE(dev_priv)) 2336 return 0; 2337 2338 seq_puts(m, "HuC firmware status:\n"); 2339 seq_printf(m, "\tpath: %s\n", huc_fw->path); 2340 seq_printf(m, "\tfetch: %s\n", 2341 intel_uc_fw_status_repr(huc_fw->fetch_status)); 2342 seq_printf(m, "\tload: %s\n", 2343 intel_uc_fw_status_repr(huc_fw->load_status)); 2344 seq_printf(m, "\tversion wanted: %d.%d\n", 2345 huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted); 2346 seq_printf(m, "\tversion found: %d.%d\n", 2347 huc_fw->major_ver_found, huc_fw->minor_ver_found); 2348 seq_printf(m, "\theader: offset is %d; size = %d\n", 2349 huc_fw->header_offset, huc_fw->header_size); 2350 seq_printf(m, "\tuCode: offset is %d; size = %d\n", 2351 huc_fw->ucode_offset, huc_fw->ucode_size); 2352 seq_printf(m, "\tRSA: offset is %d; size = %d\n", 2353 huc_fw->rsa_offset, huc_fw->rsa_size); 2354 2355 intel_runtime_pm_get(dev_priv); 2356 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); 2357 intel_runtime_pm_put(dev_priv); 2358 2359 return 0; 2360 } 2361 2362 static int i915_guc_load_status_info(struct seq_file *m, void *data) 2363 { 2364 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2365 struct intel_uc_fw *guc_fw = &dev_priv->guc.fw; 2366 u32 tmp, i; 2367 2368 if (!HAS_GUC_UCODE(dev_priv)) 2369 return 0; 2370 2371 seq_printf(m, "GuC firmware status:\n"); 2372 seq_printf(m, "\tpath: %s\n", 2373 guc_fw->path); 2374 seq_printf(m, "\tfetch: %s\n", 2375 intel_uc_fw_status_repr(guc_fw->fetch_status)); 2376 seq_printf(m, "\tload: %s\n", 2377 intel_uc_fw_status_repr(guc_fw->load_status)); 2378 seq_printf(m, "\tversion wanted: %d.%d\n", 2379 guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted); 2380 seq_printf(m, "\tversion found: %d.%d\n", 2381 guc_fw->major_ver_found, guc_fw->minor_ver_found); 2382 seq_printf(m, "\theader: offset is %d; size = %d\n", 2383 guc_fw->header_offset, guc_fw->header_size); 2384 seq_printf(m, "\tuCode: offset is %d; size = %d\n", 2385 guc_fw->ucode_offset, guc_fw->ucode_size); 2386 seq_printf(m, "\tRSA: offset is %d; size = %d\n", 2387 guc_fw->rsa_offset, guc_fw->rsa_size); 2388 2389 intel_runtime_pm_get(dev_priv); 2390 2391 tmp = I915_READ(GUC_STATUS); 2392 2393 seq_printf(m, "\nGuC status 0x%08x:\n", tmp); 2394 seq_printf(m, "\tBootrom status = 0x%x\n", 2395 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); 2396 seq_printf(m, "\tuKernel status = 0x%x\n", 2397 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); 2398 seq_printf(m, "\tMIA Core status = 0x%x\n", 2399 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); 2400 seq_puts(m, "\nScratch registers:\n"); 2401 for (i = 0; i < 16; i++) 2402 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); 2403 2404 intel_runtime_pm_put(dev_priv); 2405 2406 return 0; 2407 } 2408 2409 static void i915_guc_log_info(struct seq_file *m, 2410 struct drm_i915_private *dev_priv) 2411 { 2412 struct intel_guc *guc = &dev_priv->guc; 2413 2414 seq_puts(m, "\nGuC logging stats:\n"); 2415 2416 seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n", 2417 guc->log.flush_count[GUC_ISR_LOG_BUFFER], 2418 guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]); 2419 2420 seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n", 2421 guc->log.flush_count[GUC_DPC_LOG_BUFFER], 2422 guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]); 2423 2424 seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n", 2425 guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER], 2426 guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]); 2427 2428 seq_printf(m, "\tTotal flush interrupt count: %u\n", 2429 guc->log.flush_interrupt_count); 2430 2431 seq_printf(m, "\tCapture miss count: %u\n", 2432 guc->log.capture_miss_count); 2433 } 2434 2435 static void i915_guc_client_info(struct seq_file *m, 2436 struct drm_i915_private *dev_priv, 2437 struct i915_guc_client *client) 2438 { 2439 struct intel_engine_cs *engine; 2440 enum intel_engine_id id; 2441 uint64_t tot = 0; 2442 2443 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", 2444 client->priority, client->stage_id, client->proc_desc_offset); 2445 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n", 2446 client->doorbell_id, client->doorbell_offset, client->doorbell_cookie); 2447 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2448 client->wq_size, client->wq_offset, client->wq_tail); 2449 2450 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space); 2451 2452 for_each_engine(engine, dev_priv, id) { 2453 u64 submissions = client->submissions[id]; 2454 tot += submissions; 2455 seq_printf(m, "\tSubmissions: %llu %s\n", 2456 submissions, engine->name); 2457 } 2458 seq_printf(m, "\tTotal: %llu\n", tot); 2459 } 2460 2461 static bool check_guc_submission(struct seq_file *m) 2462 { 2463 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2464 const struct intel_guc *guc = &dev_priv->guc; 2465 2466 if (!guc->execbuf_client) { 2467 seq_printf(m, "GuC submission %s\n", 2468 HAS_GUC_SCHED(dev_priv) ? 2469 "disabled" : 2470 "not supported"); 2471 return false; 2472 } 2473 2474 return true; 2475 } 2476 2477 static int i915_guc_info(struct seq_file *m, void *data) 2478 { 2479 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2480 const struct intel_guc *guc = &dev_priv->guc; 2481 2482 if (!check_guc_submission(m)) 2483 return 0; 2484 2485 seq_printf(m, "Doorbell map:\n"); 2486 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); 2487 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); 2488 2489 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); 2490 i915_guc_client_info(m, dev_priv, guc->execbuf_client); 2491 2492 i915_guc_log_info(m, dev_priv); 2493 2494 /* Add more as required ... */ 2495 2496 return 0; 2497 } 2498 2499 static int i915_guc_stage_pool(struct seq_file *m, void *data) 2500 { 2501 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2502 const struct intel_guc *guc = &dev_priv->guc; 2503 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; 2504 struct i915_guc_client *client = guc->execbuf_client; 2505 unsigned int tmp; 2506 int index; 2507 2508 if (!check_guc_submission(m)) 2509 return 0; 2510 2511 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) { 2512 struct intel_engine_cs *engine; 2513 2514 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE)) 2515 continue; 2516 2517 seq_printf(m, "GuC stage descriptor %u:\n", index); 2518 seq_printf(m, "\tIndex: %u\n", desc->stage_id); 2519 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute); 2520 seq_printf(m, "\tPriority: %d\n", desc->priority); 2521 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id); 2522 seq_printf(m, "\tEngines used: 0x%x\n", 2523 desc->engines_used); 2524 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n", 2525 desc->db_trigger_phy, 2526 desc->db_trigger_cpu, 2527 desc->db_trigger_uk); 2528 seq_printf(m, "\tProcess descriptor: 0x%x\n", 2529 desc->process_desc); 2530 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n", 2531 desc->wq_addr, desc->wq_size); 2532 seq_putc(m, '\n'); 2533 2534 for_each_engine_masked(engine, dev_priv, client->engines, tmp) { 2535 u32 guc_engine_id = engine->guc_id; 2536 struct guc_execlist_context *lrc = 2537 &desc->lrc[guc_engine_id]; 2538 2539 seq_printf(m, "\t%s LRC:\n", engine->name); 2540 seq_printf(m, "\t\tContext desc: 0x%x\n", 2541 lrc->context_desc); 2542 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id); 2543 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca); 2544 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin); 2545 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end); 2546 seq_putc(m, '\n'); 2547 } 2548 } 2549 2550 return 0; 2551 } 2552 2553 static int i915_guc_log_dump(struct seq_file *m, void *data) 2554 { 2555 struct drm_info_node *node = m->private; 2556 struct drm_i915_private *dev_priv = node_to_i915(node); 2557 bool dump_load_err = !!node->info_ent->data; 2558 struct drm_i915_gem_object *obj = NULL; 2559 u32 *log; 2560 int i = 0; 2561 2562 if (dump_load_err) 2563 obj = dev_priv->guc.load_err_log; 2564 else if (dev_priv->guc.log.vma) 2565 obj = dev_priv->guc.log.vma->obj; 2566 2567 if (!obj) 2568 return 0; 2569 2570 log = i915_gem_object_pin_map(obj, I915_MAP_WC); 2571 if (IS_ERR(log)) { 2572 DRM_DEBUG("Failed to pin object\n"); 2573 seq_puts(m, "(log data unaccessible)\n"); 2574 return PTR_ERR(log); 2575 } 2576 2577 for (i = 0; i < obj->base.size / sizeof(u32); i += 4) 2578 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", 2579 *(log + i), *(log + i + 1), 2580 *(log + i + 2), *(log + i + 3)); 2581 2582 seq_putc(m, '\n'); 2583 2584 i915_gem_object_unpin_map(obj); 2585 2586 return 0; 2587 } 2588 2589 static int i915_guc_log_control_get(void *data, u64 *val) 2590 { 2591 struct drm_i915_private *dev_priv = data; 2592 2593 if (!dev_priv->guc.log.vma) 2594 return -EINVAL; 2595 2596 *val = i915.guc_log_level; 2597 2598 return 0; 2599 } 2600 2601 static int i915_guc_log_control_set(void *data, u64 val) 2602 { 2603 struct drm_i915_private *dev_priv = data; 2604 int ret; 2605 2606 if (!dev_priv->guc.log.vma) 2607 return -EINVAL; 2608 2609 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 2610 if (ret) 2611 return ret; 2612 2613 intel_runtime_pm_get(dev_priv); 2614 ret = i915_guc_log_control(dev_priv, val); 2615 intel_runtime_pm_put(dev_priv); 2616 2617 mutex_unlock(&dev_priv->drm.struct_mutex); 2618 return ret; 2619 } 2620 2621 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops, 2622 i915_guc_log_control_get, i915_guc_log_control_set, 2623 "%lld\n"); 2624 2625 static const char *psr2_live_status(u32 val) 2626 { 2627 static const char * const live_status[] = { 2628 "IDLE", 2629 "CAPTURE", 2630 "CAPTURE_FS", 2631 "SLEEP", 2632 "BUFON_FW", 2633 "ML_UP", 2634 "SU_STANDBY", 2635 "FAST_SLEEP", 2636 "DEEP_SLEEP", 2637 "BUF_ON", 2638 "TG_ON" 2639 }; 2640 2641 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; 2642 if (val < ARRAY_SIZE(live_status)) 2643 return live_status[val]; 2644 2645 return "unknown"; 2646 } 2647 2648 static int i915_edp_psr_status(struct seq_file *m, void *data) 2649 { 2650 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2651 u32 psrperf = 0; 2652 u32 stat[3]; 2653 enum pipe pipe; 2654 bool enabled = false; 2655 2656 if (!HAS_PSR(dev_priv)) { 2657 seq_puts(m, "PSR not supported\n"); 2658 return 0; 2659 } 2660 2661 intel_runtime_pm_get(dev_priv); 2662 2663 mutex_lock(&dev_priv->psr.lock); 2664 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2665 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2666 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2667 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2668 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2669 dev_priv->psr.busy_frontbuffer_bits); 2670 seq_printf(m, "Re-enable work scheduled: %s\n", 2671 yesno(work_busy(&dev_priv->psr.work.work))); 2672 2673 if (HAS_DDI(dev_priv)) { 2674 if (dev_priv->psr.psr2_support) 2675 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; 2676 else 2677 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2678 } else { 2679 for_each_pipe(dev_priv, pipe) { 2680 enum transcoder cpu_transcoder = 2681 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 2682 enum intel_display_power_domain power_domain; 2683 2684 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 2685 if (!intel_display_power_get_if_enabled(dev_priv, 2686 power_domain)) 2687 continue; 2688 2689 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2690 VLV_EDP_PSR_CURR_STATE_MASK; 2691 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2692 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2693 enabled = true; 2694 2695 intel_display_power_put(dev_priv, power_domain); 2696 } 2697 } 2698 2699 seq_printf(m, "Main link in standby mode: %s\n", 2700 yesno(dev_priv->psr.link_standby)); 2701 2702 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2703 2704 if (!HAS_DDI(dev_priv)) 2705 for_each_pipe(dev_priv, pipe) { 2706 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2707 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2708 seq_printf(m, " pipe %c", pipe_name(pipe)); 2709 } 2710 seq_puts(m, "\n"); 2711 2712 /* 2713 * VLV/CHV PSR has no kind of performance counter 2714 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2715 */ 2716 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2717 psrperf = I915_READ(EDP_PSR_PERF_CNT) & 2718 EDP_PSR_PERF_CNT_MASK; 2719 2720 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2721 } 2722 if (dev_priv->psr.psr2_support) { 2723 u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL); 2724 2725 seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n", 2726 psr2, psr2_live_status(psr2)); 2727 } 2728 mutex_unlock(&dev_priv->psr.lock); 2729 2730 intel_runtime_pm_put(dev_priv); 2731 return 0; 2732 } 2733 2734 static int i915_sink_crc(struct seq_file *m, void *data) 2735 { 2736 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2737 struct drm_device *dev = &dev_priv->drm; 2738 struct intel_connector *connector; 2739 struct drm_connector_list_iter conn_iter; 2740 struct intel_dp *intel_dp = NULL; 2741 int ret; 2742 u8 crc[6]; 2743 2744 drm_modeset_lock_all(dev); 2745 drm_connector_list_iter_begin(dev, &conn_iter); 2746 for_each_intel_connector_iter(connector, &conn_iter) { 2747 struct drm_crtc *crtc; 2748 2749 if (!connector->base.state->best_encoder) 2750 continue; 2751 2752 crtc = connector->base.state->crtc; 2753 if (!crtc->state->active) 2754 continue; 2755 2756 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 2757 continue; 2758 2759 intel_dp = enc_to_intel_dp(connector->base.state->best_encoder); 2760 2761 ret = intel_dp_sink_crc(intel_dp, crc); 2762 if (ret) 2763 goto out; 2764 2765 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2766 crc[0], crc[1], crc[2], 2767 crc[3], crc[4], crc[5]); 2768 goto out; 2769 } 2770 ret = -ENODEV; 2771 out: 2772 drm_connector_list_iter_end(&conn_iter); 2773 drm_modeset_unlock_all(dev); 2774 return ret; 2775 } 2776 2777 static int i915_energy_uJ(struct seq_file *m, void *data) 2778 { 2779 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2780 unsigned long long power; 2781 u32 units; 2782 2783 if (INTEL_GEN(dev_priv) < 6) 2784 return -ENODEV; 2785 2786 intel_runtime_pm_get(dev_priv); 2787 2788 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) { 2789 intel_runtime_pm_put(dev_priv); 2790 return -ENODEV; 2791 } 2792 2793 units = (power & 0x1f00) >> 8; 2794 power = I915_READ(MCH_SECP_NRG_STTS); 2795 power = (1000000 * power) >> units; /* convert to uJ */ 2796 2797 intel_runtime_pm_put(dev_priv); 2798 2799 seq_printf(m, "%llu", power); 2800 2801 return 0; 2802 } 2803 2804 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2805 { 2806 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2807 struct pci_dev *pdev = dev_priv->drm.pdev; 2808 2809 if (!HAS_RUNTIME_PM(dev_priv)) 2810 seq_puts(m, "Runtime power management not supported\n"); 2811 2812 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); 2813 seq_printf(m, "IRQs disabled: %s\n", 2814 yesno(!intel_irqs_enabled(dev_priv))); 2815 #ifdef CONFIG_PM 2816 seq_printf(m, "Usage count: %d\n", 2817 atomic_read(&dev_priv->drm.dev->power.usage_count)); 2818 #else 2819 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2820 #endif 2821 seq_printf(m, "PCI device power state: %s [%d]\n", 2822 pci_power_name(pdev->current_state), 2823 pdev->current_state); 2824 2825 return 0; 2826 } 2827 2828 static int i915_power_domain_info(struct seq_file *m, void *unused) 2829 { 2830 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2831 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2832 int i; 2833 2834 mutex_lock(&power_domains->lock); 2835 2836 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2837 for (i = 0; i < power_domains->power_well_count; i++) { 2838 struct i915_power_well *power_well; 2839 enum intel_display_power_domain power_domain; 2840 2841 power_well = &power_domains->power_wells[i]; 2842 seq_printf(m, "%-25s %d\n", power_well->name, 2843 power_well->count); 2844 2845 for_each_power_domain(power_domain, power_well->domains) 2846 seq_printf(m, " %-23s %d\n", 2847 intel_display_power_domain_str(power_domain), 2848 power_domains->domain_use_count[power_domain]); 2849 } 2850 2851 mutex_unlock(&power_domains->lock); 2852 2853 return 0; 2854 } 2855 2856 static int i915_dmc_info(struct seq_file *m, void *unused) 2857 { 2858 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2859 struct intel_csr *csr; 2860 2861 if (!HAS_CSR(dev_priv)) { 2862 seq_puts(m, "not supported\n"); 2863 return 0; 2864 } 2865 2866 csr = &dev_priv->csr; 2867 2868 intel_runtime_pm_get(dev_priv); 2869 2870 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 2871 seq_printf(m, "path: %s\n", csr->fw_path); 2872 2873 if (!csr->dmc_payload) 2874 goto out; 2875 2876 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2877 CSR_VERSION_MINOR(csr->version)); 2878 2879 if (IS_KABYLAKE(dev_priv) || 2880 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) { 2881 seq_printf(m, "DC3 -> DC5 count: %d\n", 2882 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2883 seq_printf(m, "DC5 -> DC6 count: %d\n", 2884 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2885 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) { 2886 seq_printf(m, "DC3 -> DC5 count: %d\n", 2887 I915_READ(BXT_CSR_DC3_DC5_COUNT)); 2888 } 2889 2890 out: 2891 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2892 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); 2893 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); 2894 2895 intel_runtime_pm_put(dev_priv); 2896 2897 return 0; 2898 } 2899 2900 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2901 struct drm_display_mode *mode) 2902 { 2903 int i; 2904 2905 for (i = 0; i < tabs; i++) 2906 seq_putc(m, '\t'); 2907 2908 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2909 mode->base.id, mode->name, 2910 mode->vrefresh, mode->clock, 2911 mode->hdisplay, mode->hsync_start, 2912 mode->hsync_end, mode->htotal, 2913 mode->vdisplay, mode->vsync_start, 2914 mode->vsync_end, mode->vtotal, 2915 mode->type, mode->flags); 2916 } 2917 2918 static void intel_encoder_info(struct seq_file *m, 2919 struct intel_crtc *intel_crtc, 2920 struct intel_encoder *intel_encoder) 2921 { 2922 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2923 struct drm_device *dev = &dev_priv->drm; 2924 struct drm_crtc *crtc = &intel_crtc->base; 2925 struct intel_connector *intel_connector; 2926 struct drm_encoder *encoder; 2927 2928 encoder = &intel_encoder->base; 2929 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2930 encoder->base.id, encoder->name); 2931 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2932 struct drm_connector *connector = &intel_connector->base; 2933 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2934 connector->base.id, 2935 connector->name, 2936 drm_get_connector_status_name(connector->status)); 2937 if (connector->status == connector_status_connected) { 2938 struct drm_display_mode *mode = &crtc->mode; 2939 seq_printf(m, ", mode:\n"); 2940 intel_seq_print_mode(m, 2, mode); 2941 } else { 2942 seq_putc(m, '\n'); 2943 } 2944 } 2945 } 2946 2947 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2948 { 2949 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2950 struct drm_device *dev = &dev_priv->drm; 2951 struct drm_crtc *crtc = &intel_crtc->base; 2952 struct intel_encoder *intel_encoder; 2953 struct drm_plane_state *plane_state = crtc->primary->state; 2954 struct drm_framebuffer *fb = plane_state->fb; 2955 2956 if (fb) 2957 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2958 fb->base.id, plane_state->src_x >> 16, 2959 plane_state->src_y >> 16, fb->width, fb->height); 2960 else 2961 seq_puts(m, "\tprimary plane disabled\n"); 2962 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2963 intel_encoder_info(m, intel_crtc, intel_encoder); 2964 } 2965 2966 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2967 { 2968 struct drm_display_mode *mode = panel->fixed_mode; 2969 2970 seq_printf(m, "\tfixed mode:\n"); 2971 intel_seq_print_mode(m, 2, mode); 2972 } 2973 2974 static void intel_dp_info(struct seq_file *m, 2975 struct intel_connector *intel_connector) 2976 { 2977 struct intel_encoder *intel_encoder = intel_connector->encoder; 2978 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2979 2980 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2981 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2982 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 2983 intel_panel_info(m, &intel_connector->panel); 2984 2985 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 2986 &intel_dp->aux); 2987 } 2988 2989 static void intel_dp_mst_info(struct seq_file *m, 2990 struct intel_connector *intel_connector) 2991 { 2992 struct intel_encoder *intel_encoder = intel_connector->encoder; 2993 struct intel_dp_mst_encoder *intel_mst = 2994 enc_to_mst(&intel_encoder->base); 2995 struct intel_digital_port *intel_dig_port = intel_mst->primary; 2996 struct intel_dp *intel_dp = &intel_dig_port->dp; 2997 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 2998 intel_connector->port); 2999 3000 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 3001 } 3002 3003 static void intel_hdmi_info(struct seq_file *m, 3004 struct intel_connector *intel_connector) 3005 { 3006 struct intel_encoder *intel_encoder = intel_connector->encoder; 3007 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 3008 3009 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 3010 } 3011 3012 static void intel_lvds_info(struct seq_file *m, 3013 struct intel_connector *intel_connector) 3014 { 3015 intel_panel_info(m, &intel_connector->panel); 3016 } 3017 3018 static void intel_connector_info(struct seq_file *m, 3019 struct drm_connector *connector) 3020 { 3021 struct intel_connector *intel_connector = to_intel_connector(connector); 3022 struct intel_encoder *intel_encoder = intel_connector->encoder; 3023 struct drm_display_mode *mode; 3024 3025 seq_printf(m, "connector %d: type %s, status: %s\n", 3026 connector->base.id, connector->name, 3027 drm_get_connector_status_name(connector->status)); 3028 if (connector->status == connector_status_connected) { 3029 seq_printf(m, "\tname: %s\n", connector->display_info.name); 3030 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 3031 connector->display_info.width_mm, 3032 connector->display_info.height_mm); 3033 seq_printf(m, "\tsubpixel order: %s\n", 3034 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 3035 seq_printf(m, "\tCEA rev: %d\n", 3036 connector->display_info.cea_rev); 3037 } 3038 3039 if (!intel_encoder) 3040 return; 3041 3042 switch (connector->connector_type) { 3043 case DRM_MODE_CONNECTOR_DisplayPort: 3044 case DRM_MODE_CONNECTOR_eDP: 3045 if (intel_encoder->type == INTEL_OUTPUT_DP_MST) 3046 intel_dp_mst_info(m, intel_connector); 3047 else 3048 intel_dp_info(m, intel_connector); 3049 break; 3050 case DRM_MODE_CONNECTOR_LVDS: 3051 if (intel_encoder->type == INTEL_OUTPUT_LVDS) 3052 intel_lvds_info(m, intel_connector); 3053 break; 3054 case DRM_MODE_CONNECTOR_HDMIA: 3055 if (intel_encoder->type == INTEL_OUTPUT_HDMI || 3056 intel_encoder->type == INTEL_OUTPUT_UNKNOWN) 3057 intel_hdmi_info(m, intel_connector); 3058 break; 3059 default: 3060 break; 3061 } 3062 3063 seq_printf(m, "\tmodes:\n"); 3064 list_for_each_entry(mode, &connector->modes, head) 3065 intel_seq_print_mode(m, 2, mode); 3066 } 3067 3068 static const char *plane_type(enum drm_plane_type type) 3069 { 3070 switch (type) { 3071 case DRM_PLANE_TYPE_OVERLAY: 3072 return "OVL"; 3073 case DRM_PLANE_TYPE_PRIMARY: 3074 return "PRI"; 3075 case DRM_PLANE_TYPE_CURSOR: 3076 return "CUR"; 3077 /* 3078 * Deliberately omitting default: to generate compiler warnings 3079 * when a new drm_plane_type gets added. 3080 */ 3081 } 3082 3083 return "unknown"; 3084 } 3085 3086 static const char *plane_rotation(unsigned int rotation) 3087 { 3088 static char buf[48]; 3089 /* 3090 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 3091 * will print them all to visualize if the values are misused 3092 */ 3093 snprintf(buf, sizeof(buf), 3094 "%s%s%s%s%s%s(0x%08x)", 3095 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 3096 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 3097 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", 3098 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", 3099 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 3100 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 3101 rotation); 3102 3103 return buf; 3104 } 3105 3106 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3107 { 3108 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3109 struct drm_device *dev = &dev_priv->drm; 3110 struct intel_plane *intel_plane; 3111 3112 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3113 struct drm_plane_state *state; 3114 struct drm_plane *plane = &intel_plane->base; 3115 struct drm_format_name_buf format_name; 3116 3117 if (!plane->state) { 3118 seq_puts(m, "plane->state is NULL!\n"); 3119 continue; 3120 } 3121 3122 state = plane->state; 3123 3124 if (state->fb) { 3125 drm_get_format_name(state->fb->format->format, 3126 &format_name); 3127 } else { 3128 sprintf(format_name.str, "N/A"); 3129 } 3130 3131 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3132 plane->base.id, 3133 plane_type(intel_plane->base.type), 3134 state->crtc_x, state->crtc_y, 3135 state->crtc_w, state->crtc_h, 3136 (state->src_x >> 16), 3137 ((state->src_x & 0xffff) * 15625) >> 10, 3138 (state->src_y >> 16), 3139 ((state->src_y & 0xffff) * 15625) >> 10, 3140 (state->src_w >> 16), 3141 ((state->src_w & 0xffff) * 15625) >> 10, 3142 (state->src_h >> 16), 3143 ((state->src_h & 0xffff) * 15625) >> 10, 3144 format_name.str, 3145 plane_rotation(state->rotation)); 3146 } 3147 } 3148 3149 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3150 { 3151 struct intel_crtc_state *pipe_config; 3152 int num_scalers = intel_crtc->num_scalers; 3153 int i; 3154 3155 pipe_config = to_intel_crtc_state(intel_crtc->base.state); 3156 3157 /* Not all platformas have a scaler */ 3158 if (num_scalers) { 3159 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 3160 num_scalers, 3161 pipe_config->scaler_state.scaler_users, 3162 pipe_config->scaler_state.scaler_id); 3163 3164 for (i = 0; i < num_scalers; i++) { 3165 struct intel_scaler *sc = 3166 &pipe_config->scaler_state.scalers[i]; 3167 3168 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 3169 i, yesno(sc->in_use), sc->mode); 3170 } 3171 seq_puts(m, "\n"); 3172 } else { 3173 seq_puts(m, "\tNo scalers available on this platform\n"); 3174 } 3175 } 3176 3177 static int i915_display_info(struct seq_file *m, void *unused) 3178 { 3179 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3180 struct drm_device *dev = &dev_priv->drm; 3181 struct intel_crtc *crtc; 3182 struct drm_connector *connector; 3183 struct drm_connector_list_iter conn_iter; 3184 3185 intel_runtime_pm_get(dev_priv); 3186 seq_printf(m, "CRTC info\n"); 3187 seq_printf(m, "---------\n"); 3188 for_each_intel_crtc(dev, crtc) { 3189 struct intel_crtc_state *pipe_config; 3190 3191 drm_modeset_lock(&crtc->base.mutex, NULL); 3192 pipe_config = to_intel_crtc_state(crtc->base.state); 3193 3194 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", 3195 crtc->base.base.id, pipe_name(crtc->pipe), 3196 yesno(pipe_config->base.active), 3197 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 3198 yesno(pipe_config->dither), pipe_config->pipe_bpp); 3199 3200 if (pipe_config->base.active) { 3201 struct intel_plane *cursor = 3202 to_intel_plane(crtc->base.cursor); 3203 3204 intel_crtc_info(m, crtc); 3205 3206 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", 3207 yesno(cursor->base.state->visible), 3208 cursor->base.state->crtc_x, 3209 cursor->base.state->crtc_y, 3210 cursor->base.state->crtc_w, 3211 cursor->base.state->crtc_h, 3212 cursor->cursor.base); 3213 intel_scaler_info(m, crtc); 3214 intel_plane_info(m, crtc); 3215 } 3216 3217 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3218 yesno(!crtc->cpu_fifo_underrun_disabled), 3219 yesno(!crtc->pch_fifo_underrun_disabled)); 3220 drm_modeset_unlock(&crtc->base.mutex); 3221 } 3222 3223 seq_printf(m, "\n"); 3224 seq_printf(m, "Connector info\n"); 3225 seq_printf(m, "--------------\n"); 3226 mutex_lock(&dev->mode_config.mutex); 3227 drm_connector_list_iter_begin(dev, &conn_iter); 3228 drm_for_each_connector_iter(connector, &conn_iter) 3229 intel_connector_info(m, connector); 3230 drm_connector_list_iter_end(&conn_iter); 3231 mutex_unlock(&dev->mode_config.mutex); 3232 3233 intel_runtime_pm_put(dev_priv); 3234 3235 return 0; 3236 } 3237 3238 static int i915_engine_info(struct seq_file *m, void *unused) 3239 { 3240 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3241 struct i915_gpu_error *error = &dev_priv->gpu_error; 3242 struct intel_engine_cs *engine; 3243 enum intel_engine_id id; 3244 3245 intel_runtime_pm_get(dev_priv); 3246 3247 seq_printf(m, "GT awake? %s\n", 3248 yesno(dev_priv->gt.awake)); 3249 seq_printf(m, "Global active requests: %d\n", 3250 dev_priv->gt.active_requests); 3251 3252 for_each_engine(engine, dev_priv, id) { 3253 struct intel_breadcrumbs *b = &engine->breadcrumbs; 3254 struct drm_i915_gem_request *rq; 3255 struct rb_node *rb; 3256 u64 addr; 3257 3258 seq_printf(m, "%s\n", engine->name); 3259 seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n", 3260 intel_engine_get_seqno(engine), 3261 intel_engine_last_submit(engine), 3262 engine->hangcheck.seqno, 3263 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), 3264 engine->timeline->inflight_seqnos); 3265 seq_printf(m, "\tReset count: %d\n", 3266 i915_reset_engine_count(error, engine)); 3267 3268 rcu_read_lock(); 3269 3270 seq_printf(m, "\tRequests:\n"); 3271 3272 rq = list_first_entry(&engine->timeline->requests, 3273 struct drm_i915_gem_request, link); 3274 if (&rq->link != &engine->timeline->requests) 3275 print_request(m, rq, "\t\tfirst "); 3276 3277 rq = list_last_entry(&engine->timeline->requests, 3278 struct drm_i915_gem_request, link); 3279 if (&rq->link != &engine->timeline->requests) 3280 print_request(m, rq, "\t\tlast "); 3281 3282 rq = i915_gem_find_active_request(engine); 3283 if (rq) { 3284 print_request(m, rq, "\t\tactive "); 3285 seq_printf(m, 3286 "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", 3287 rq->head, rq->postfix, rq->tail, 3288 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, 3289 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); 3290 } 3291 3292 seq_printf(m, "\tRING_START: 0x%08x [0x%08x]\n", 3293 I915_READ(RING_START(engine->mmio_base)), 3294 rq ? i915_ggtt_offset(rq->ring->vma) : 0); 3295 seq_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n", 3296 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR, 3297 rq ? rq->ring->head : 0); 3298 seq_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n", 3299 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR, 3300 rq ? rq->ring->tail : 0); 3301 seq_printf(m, "\tRING_CTL: 0x%08x [%s]\n", 3302 I915_READ(RING_CTL(engine->mmio_base)), 3303 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : ""); 3304 3305 rcu_read_unlock(); 3306 3307 addr = intel_engine_get_active_head(engine); 3308 seq_printf(m, "\tACTHD: 0x%08x_%08x\n", 3309 upper_32_bits(addr), lower_32_bits(addr)); 3310 addr = intel_engine_get_last_batch_head(engine); 3311 seq_printf(m, "\tBBADDR: 0x%08x_%08x\n", 3312 upper_32_bits(addr), lower_32_bits(addr)); 3313 3314 if (i915.enable_execlists) { 3315 u32 ptr, read, write; 3316 unsigned int idx; 3317 3318 seq_printf(m, "\tExeclist status: 0x%08x %08x\n", 3319 I915_READ(RING_EXECLIST_STATUS_LO(engine)), 3320 I915_READ(RING_EXECLIST_STATUS_HI(engine))); 3321 3322 ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); 3323 read = GEN8_CSB_READ_PTR(ptr); 3324 write = GEN8_CSB_WRITE_PTR(ptr); 3325 seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n", 3326 read, write, 3327 yesno(test_bit(ENGINE_IRQ_EXECLIST, 3328 &engine->irq_posted))); 3329 if (read >= GEN8_CSB_ENTRIES) 3330 read = 0; 3331 if (write >= GEN8_CSB_ENTRIES) 3332 write = 0; 3333 if (read > write) 3334 write += GEN8_CSB_ENTRIES; 3335 while (read < write) { 3336 idx = ++read % GEN8_CSB_ENTRIES; 3337 seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n", 3338 idx, 3339 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), 3340 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx))); 3341 } 3342 3343 rcu_read_lock(); 3344 for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) { 3345 unsigned int count; 3346 3347 rq = port_unpack(&engine->execlist_port[idx], 3348 &count); 3349 if (rq) { 3350 seq_printf(m, "\t\tELSP[%d] count=%d, ", 3351 idx, count); 3352 print_request(m, rq, "rq: "); 3353 } else { 3354 seq_printf(m, "\t\tELSP[%d] idle\n", 3355 idx); 3356 } 3357 } 3358 rcu_read_unlock(); 3359 3360 spin_lock_irq(&engine->timeline->lock); 3361 for (rb = engine->execlist_first; rb; rb = rb_next(rb)){ 3362 struct i915_priolist *p = 3363 rb_entry(rb, typeof(*p), node); 3364 3365 list_for_each_entry(rq, &p->requests, 3366 priotree.link) 3367 print_request(m, rq, "\t\tQ "); 3368 } 3369 spin_unlock_irq(&engine->timeline->lock); 3370 } else if (INTEL_GEN(dev_priv) > 6) { 3371 seq_printf(m, "\tPP_DIR_BASE: 0x%08x\n", 3372 I915_READ(RING_PP_DIR_BASE(engine))); 3373 seq_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", 3374 I915_READ(RING_PP_DIR_BASE_READ(engine))); 3375 seq_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", 3376 I915_READ(RING_PP_DIR_DCLV(engine))); 3377 } 3378 3379 spin_lock_irq(&b->rb_lock); 3380 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 3381 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 3382 3383 seq_printf(m, "\t%s [%d] waiting for %x\n", 3384 w->tsk->comm, w->tsk->pid, w->seqno); 3385 } 3386 spin_unlock_irq(&b->rb_lock); 3387 3388 seq_puts(m, "\n"); 3389 } 3390 3391 intel_runtime_pm_put(dev_priv); 3392 3393 return 0; 3394 } 3395 3396 static int i915_semaphore_status(struct seq_file *m, void *unused) 3397 { 3398 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3399 struct drm_device *dev = &dev_priv->drm; 3400 struct intel_engine_cs *engine; 3401 int num_rings = INTEL_INFO(dev_priv)->num_rings; 3402 enum intel_engine_id id; 3403 int j, ret; 3404 3405 if (!i915.semaphores) { 3406 seq_puts(m, "Semaphores are disabled\n"); 3407 return 0; 3408 } 3409 3410 ret = mutex_lock_interruptible(&dev->struct_mutex); 3411 if (ret) 3412 return ret; 3413 intel_runtime_pm_get(dev_priv); 3414 3415 if (IS_BROADWELL(dev_priv)) { 3416 struct page *page; 3417 uint64_t *seqno; 3418 3419 page = i915_gem_object_get_page(dev_priv->semaphore->obj, 0); 3420 3421 seqno = (uint64_t *)kmap_atomic(page); 3422 for_each_engine(engine, dev_priv, id) { 3423 uint64_t offset; 3424 3425 seq_printf(m, "%s\n", engine->name); 3426 3427 seq_puts(m, " Last signal:"); 3428 for (j = 0; j < num_rings; j++) { 3429 offset = id * I915_NUM_ENGINES + j; 3430 seq_printf(m, "0x%08llx (0x%02llx) ", 3431 seqno[offset], offset * 8); 3432 } 3433 seq_putc(m, '\n'); 3434 3435 seq_puts(m, " Last wait: "); 3436 for (j = 0; j < num_rings; j++) { 3437 offset = id + (j * I915_NUM_ENGINES); 3438 seq_printf(m, "0x%08llx (0x%02llx) ", 3439 seqno[offset], offset * 8); 3440 } 3441 seq_putc(m, '\n'); 3442 3443 } 3444 kunmap_atomic(seqno); 3445 } else { 3446 seq_puts(m, " Last signal:"); 3447 for_each_engine(engine, dev_priv, id) 3448 for (j = 0; j < num_rings; j++) 3449 seq_printf(m, "0x%08x\n", 3450 I915_READ(engine->semaphore.mbox.signal[j])); 3451 seq_putc(m, '\n'); 3452 } 3453 3454 intel_runtime_pm_put(dev_priv); 3455 mutex_unlock(&dev->struct_mutex); 3456 return 0; 3457 } 3458 3459 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 3460 { 3461 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3462 struct drm_device *dev = &dev_priv->drm; 3463 int i; 3464 3465 drm_modeset_lock_all(dev); 3466 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3467 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 3468 3469 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 3470 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", 3471 pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); 3472 seq_printf(m, " tracked hardware state:\n"); 3473 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 3474 seq_printf(m, " dpll_md: 0x%08x\n", 3475 pll->state.hw_state.dpll_md); 3476 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 3477 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 3478 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 3479 } 3480 drm_modeset_unlock_all(dev); 3481 3482 return 0; 3483 } 3484 3485 static int i915_wa_registers(struct seq_file *m, void *unused) 3486 { 3487 int i; 3488 int ret; 3489 struct intel_engine_cs *engine; 3490 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3491 struct drm_device *dev = &dev_priv->drm; 3492 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3493 enum intel_engine_id id; 3494 3495 ret = mutex_lock_interruptible(&dev->struct_mutex); 3496 if (ret) 3497 return ret; 3498 3499 intel_runtime_pm_get(dev_priv); 3500 3501 seq_printf(m, "Workarounds applied: %d\n", workarounds->count); 3502 for_each_engine(engine, dev_priv, id) 3503 seq_printf(m, "HW whitelist count for %s: %d\n", 3504 engine->name, workarounds->hw_whitelist_count[id]); 3505 for (i = 0; i < workarounds->count; ++i) { 3506 i915_reg_t addr; 3507 u32 mask, value, read; 3508 bool ok; 3509 3510 addr = workarounds->reg[i].addr; 3511 mask = workarounds->reg[i].mask; 3512 value = workarounds->reg[i].value; 3513 read = I915_READ(addr); 3514 ok = (value & mask) == (read & mask); 3515 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3516 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); 3517 } 3518 3519 intel_runtime_pm_put(dev_priv); 3520 mutex_unlock(&dev->struct_mutex); 3521 3522 return 0; 3523 } 3524 3525 static int i915_ddb_info(struct seq_file *m, void *unused) 3526 { 3527 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3528 struct drm_device *dev = &dev_priv->drm; 3529 struct skl_ddb_allocation *ddb; 3530 struct skl_ddb_entry *entry; 3531 enum pipe pipe; 3532 int plane; 3533 3534 if (INTEL_GEN(dev_priv) < 9) 3535 return 0; 3536 3537 drm_modeset_lock_all(dev); 3538 3539 ddb = &dev_priv->wm.skl_hw.ddb; 3540 3541 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 3542 3543 for_each_pipe(dev_priv, pipe) { 3544 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3545 3546 for_each_universal_plane(dev_priv, pipe, plane) { 3547 entry = &ddb->plane[pipe][plane]; 3548 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3549 entry->start, entry->end, 3550 skl_ddb_entry_size(entry)); 3551 } 3552 3553 entry = &ddb->plane[pipe][PLANE_CURSOR]; 3554 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3555 entry->end, skl_ddb_entry_size(entry)); 3556 } 3557 3558 drm_modeset_unlock_all(dev); 3559 3560 return 0; 3561 } 3562 3563 static void drrs_status_per_crtc(struct seq_file *m, 3564 struct drm_device *dev, 3565 struct intel_crtc *intel_crtc) 3566 { 3567 struct drm_i915_private *dev_priv = to_i915(dev); 3568 struct i915_drrs *drrs = &dev_priv->drrs; 3569 int vrefresh = 0; 3570 struct drm_connector *connector; 3571 struct drm_connector_list_iter conn_iter; 3572 3573 drm_connector_list_iter_begin(dev, &conn_iter); 3574 drm_for_each_connector_iter(connector, &conn_iter) { 3575 if (connector->state->crtc != &intel_crtc->base) 3576 continue; 3577 3578 seq_printf(m, "%s:\n", connector->name); 3579 } 3580 drm_connector_list_iter_end(&conn_iter); 3581 3582 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3583 seq_puts(m, "\tVBT: DRRS_type: Static"); 3584 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3585 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3586 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3587 seq_puts(m, "\tVBT: DRRS_type: None"); 3588 else 3589 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3590 3591 seq_puts(m, "\n\n"); 3592 3593 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3594 struct intel_panel *panel; 3595 3596 mutex_lock(&drrs->mutex); 3597 /* DRRS Supported */ 3598 seq_puts(m, "\tDRRS Supported: Yes\n"); 3599 3600 /* disable_drrs() will make drrs->dp NULL */ 3601 if (!drrs->dp) { 3602 seq_puts(m, "Idleness DRRS: Disabled"); 3603 mutex_unlock(&drrs->mutex); 3604 return; 3605 } 3606 3607 panel = &drrs->dp->attached_connector->panel; 3608 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3609 drrs->busy_frontbuffer_bits); 3610 3611 seq_puts(m, "\n\t\t"); 3612 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3613 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3614 vrefresh = panel->fixed_mode->vrefresh; 3615 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3616 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3617 vrefresh = panel->downclock_mode->vrefresh; 3618 } else { 3619 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3620 drrs->refresh_rate_type); 3621 mutex_unlock(&drrs->mutex); 3622 return; 3623 } 3624 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3625 3626 seq_puts(m, "\n\t\t"); 3627 mutex_unlock(&drrs->mutex); 3628 } else { 3629 /* DRRS not supported. Print the VBT parameter*/ 3630 seq_puts(m, "\tDRRS Supported : No"); 3631 } 3632 seq_puts(m, "\n"); 3633 } 3634 3635 static int i915_drrs_status(struct seq_file *m, void *unused) 3636 { 3637 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3638 struct drm_device *dev = &dev_priv->drm; 3639 struct intel_crtc *intel_crtc; 3640 int active_crtc_cnt = 0; 3641 3642 drm_modeset_lock_all(dev); 3643 for_each_intel_crtc(dev, intel_crtc) { 3644 if (intel_crtc->base.state->active) { 3645 active_crtc_cnt++; 3646 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3647 3648 drrs_status_per_crtc(m, dev, intel_crtc); 3649 } 3650 } 3651 drm_modeset_unlock_all(dev); 3652 3653 if (!active_crtc_cnt) 3654 seq_puts(m, "No active crtc found\n"); 3655 3656 return 0; 3657 } 3658 3659 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3660 { 3661 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3662 struct drm_device *dev = &dev_priv->drm; 3663 struct intel_encoder *intel_encoder; 3664 struct intel_digital_port *intel_dig_port; 3665 struct drm_connector *connector; 3666 struct drm_connector_list_iter conn_iter; 3667 3668 drm_connector_list_iter_begin(dev, &conn_iter); 3669 drm_for_each_connector_iter(connector, &conn_iter) { 3670 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 3671 continue; 3672 3673 intel_encoder = intel_attached_encoder(connector); 3674 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 3675 continue; 3676 3677 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 3678 if (!intel_dig_port->dp.can_mst) 3679 continue; 3680 3681 seq_printf(m, "MST Source Port %c\n", 3682 port_name(intel_dig_port->port)); 3683 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3684 } 3685 drm_connector_list_iter_end(&conn_iter); 3686 3687 return 0; 3688 } 3689 3690 static ssize_t i915_displayport_test_active_write(struct file *file, 3691 const char __user *ubuf, 3692 size_t len, loff_t *offp) 3693 { 3694 char *input_buffer; 3695 int status = 0; 3696 struct drm_device *dev; 3697 struct drm_connector *connector; 3698 struct drm_connector_list_iter conn_iter; 3699 struct intel_dp *intel_dp; 3700 int val = 0; 3701 3702 dev = ((struct seq_file *)file->private_data)->private; 3703 3704 if (len == 0) 3705 return 0; 3706 3707 input_buffer = memdup_user_nul(ubuf, len); 3708 if (IS_ERR(input_buffer)) 3709 return PTR_ERR(input_buffer); 3710 3711 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 3712 3713 drm_connector_list_iter_begin(dev, &conn_iter); 3714 drm_for_each_connector_iter(connector, &conn_iter) { 3715 struct intel_encoder *encoder; 3716 3717 if (connector->connector_type != 3718 DRM_MODE_CONNECTOR_DisplayPort) 3719 continue; 3720 3721 encoder = to_intel_encoder(connector->encoder); 3722 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3723 continue; 3724 3725 if (encoder && connector->status == connector_status_connected) { 3726 intel_dp = enc_to_intel_dp(&encoder->base); 3727 status = kstrtoint(input_buffer, 10, &val); 3728 if (status < 0) 3729 break; 3730 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 3731 /* To prevent erroneous activation of the compliance 3732 * testing code, only accept an actual value of 1 here 3733 */ 3734 if (val == 1) 3735 intel_dp->compliance.test_active = 1; 3736 else 3737 intel_dp->compliance.test_active = 0; 3738 } 3739 } 3740 drm_connector_list_iter_end(&conn_iter); 3741 kfree(input_buffer); 3742 if (status < 0) 3743 return status; 3744 3745 *offp += len; 3746 return len; 3747 } 3748 3749 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 3750 { 3751 struct drm_device *dev = m->private; 3752 struct drm_connector *connector; 3753 struct drm_connector_list_iter conn_iter; 3754 struct intel_dp *intel_dp; 3755 3756 drm_connector_list_iter_begin(dev, &conn_iter); 3757 drm_for_each_connector_iter(connector, &conn_iter) { 3758 struct intel_encoder *encoder; 3759 3760 if (connector->connector_type != 3761 DRM_MODE_CONNECTOR_DisplayPort) 3762 continue; 3763 3764 encoder = to_intel_encoder(connector->encoder); 3765 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3766 continue; 3767 3768 if (encoder && connector->status == connector_status_connected) { 3769 intel_dp = enc_to_intel_dp(&encoder->base); 3770 if (intel_dp->compliance.test_active) 3771 seq_puts(m, "1"); 3772 else 3773 seq_puts(m, "0"); 3774 } else 3775 seq_puts(m, "0"); 3776 } 3777 drm_connector_list_iter_end(&conn_iter); 3778 3779 return 0; 3780 } 3781 3782 static int i915_displayport_test_active_open(struct inode *inode, 3783 struct file *file) 3784 { 3785 struct drm_i915_private *dev_priv = inode->i_private; 3786 3787 return single_open(file, i915_displayport_test_active_show, 3788 &dev_priv->drm); 3789 } 3790 3791 static const struct file_operations i915_displayport_test_active_fops = { 3792 .owner = THIS_MODULE, 3793 .open = i915_displayport_test_active_open, 3794 .read = seq_read, 3795 .llseek = seq_lseek, 3796 .release = single_release, 3797 .write = i915_displayport_test_active_write 3798 }; 3799 3800 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 3801 { 3802 struct drm_device *dev = m->private; 3803 struct drm_connector *connector; 3804 struct drm_connector_list_iter conn_iter; 3805 struct intel_dp *intel_dp; 3806 3807 drm_connector_list_iter_begin(dev, &conn_iter); 3808 drm_for_each_connector_iter(connector, &conn_iter) { 3809 struct intel_encoder *encoder; 3810 3811 if (connector->connector_type != 3812 DRM_MODE_CONNECTOR_DisplayPort) 3813 continue; 3814 3815 encoder = to_intel_encoder(connector->encoder); 3816 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3817 continue; 3818 3819 if (encoder && connector->status == connector_status_connected) { 3820 intel_dp = enc_to_intel_dp(&encoder->base); 3821 if (intel_dp->compliance.test_type == 3822 DP_TEST_LINK_EDID_READ) 3823 seq_printf(m, "%lx", 3824 intel_dp->compliance.test_data.edid); 3825 else if (intel_dp->compliance.test_type == 3826 DP_TEST_LINK_VIDEO_PATTERN) { 3827 seq_printf(m, "hdisplay: %d\n", 3828 intel_dp->compliance.test_data.hdisplay); 3829 seq_printf(m, "vdisplay: %d\n", 3830 intel_dp->compliance.test_data.vdisplay); 3831 seq_printf(m, "bpc: %u\n", 3832 intel_dp->compliance.test_data.bpc); 3833 } 3834 } else 3835 seq_puts(m, "0"); 3836 } 3837 drm_connector_list_iter_end(&conn_iter); 3838 3839 return 0; 3840 } 3841 static int i915_displayport_test_data_open(struct inode *inode, 3842 struct file *file) 3843 { 3844 struct drm_i915_private *dev_priv = inode->i_private; 3845 3846 return single_open(file, i915_displayport_test_data_show, 3847 &dev_priv->drm); 3848 } 3849 3850 static const struct file_operations i915_displayport_test_data_fops = { 3851 .owner = THIS_MODULE, 3852 .open = i915_displayport_test_data_open, 3853 .read = seq_read, 3854 .llseek = seq_lseek, 3855 .release = single_release 3856 }; 3857 3858 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 3859 { 3860 struct drm_device *dev = m->private; 3861 struct drm_connector *connector; 3862 struct drm_connector_list_iter conn_iter; 3863 struct intel_dp *intel_dp; 3864 3865 drm_connector_list_iter_begin(dev, &conn_iter); 3866 drm_for_each_connector_iter(connector, &conn_iter) { 3867 struct intel_encoder *encoder; 3868 3869 if (connector->connector_type != 3870 DRM_MODE_CONNECTOR_DisplayPort) 3871 continue; 3872 3873 encoder = to_intel_encoder(connector->encoder); 3874 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3875 continue; 3876 3877 if (encoder && connector->status == connector_status_connected) { 3878 intel_dp = enc_to_intel_dp(&encoder->base); 3879 seq_printf(m, "%02lx", intel_dp->compliance.test_type); 3880 } else 3881 seq_puts(m, "0"); 3882 } 3883 drm_connector_list_iter_end(&conn_iter); 3884 3885 return 0; 3886 } 3887 3888 static int i915_displayport_test_type_open(struct inode *inode, 3889 struct file *file) 3890 { 3891 struct drm_i915_private *dev_priv = inode->i_private; 3892 3893 return single_open(file, i915_displayport_test_type_show, 3894 &dev_priv->drm); 3895 } 3896 3897 static const struct file_operations i915_displayport_test_type_fops = { 3898 .owner = THIS_MODULE, 3899 .open = i915_displayport_test_type_open, 3900 .read = seq_read, 3901 .llseek = seq_lseek, 3902 .release = single_release 3903 }; 3904 3905 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 3906 { 3907 struct drm_i915_private *dev_priv = m->private; 3908 struct drm_device *dev = &dev_priv->drm; 3909 int level; 3910 int num_levels; 3911 3912 if (IS_CHERRYVIEW(dev_priv)) 3913 num_levels = 3; 3914 else if (IS_VALLEYVIEW(dev_priv)) 3915 num_levels = 1; 3916 else if (IS_G4X(dev_priv)) 3917 num_levels = 3; 3918 else 3919 num_levels = ilk_wm_max_level(dev_priv) + 1; 3920 3921 drm_modeset_lock_all(dev); 3922 3923 for (level = 0; level < num_levels; level++) { 3924 unsigned int latency = wm[level]; 3925 3926 /* 3927 * - WM1+ latency values in 0.5us units 3928 * - latencies are in us on gen9/vlv/chv 3929 */ 3930 if (INTEL_GEN(dev_priv) >= 9 || 3931 IS_VALLEYVIEW(dev_priv) || 3932 IS_CHERRYVIEW(dev_priv) || 3933 IS_G4X(dev_priv)) 3934 latency *= 10; 3935 else if (level > 0) 3936 latency *= 5; 3937 3938 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3939 level, wm[level], latency / 10, latency % 10); 3940 } 3941 3942 drm_modeset_unlock_all(dev); 3943 } 3944 3945 static int pri_wm_latency_show(struct seq_file *m, void *data) 3946 { 3947 struct drm_i915_private *dev_priv = m->private; 3948 const uint16_t *latencies; 3949 3950 if (INTEL_GEN(dev_priv) >= 9) 3951 latencies = dev_priv->wm.skl_latency; 3952 else 3953 latencies = dev_priv->wm.pri_latency; 3954 3955 wm_latency_show(m, latencies); 3956 3957 return 0; 3958 } 3959 3960 static int spr_wm_latency_show(struct seq_file *m, void *data) 3961 { 3962 struct drm_i915_private *dev_priv = m->private; 3963 const uint16_t *latencies; 3964 3965 if (INTEL_GEN(dev_priv) >= 9) 3966 latencies = dev_priv->wm.skl_latency; 3967 else 3968 latencies = dev_priv->wm.spr_latency; 3969 3970 wm_latency_show(m, latencies); 3971 3972 return 0; 3973 } 3974 3975 static int cur_wm_latency_show(struct seq_file *m, void *data) 3976 { 3977 struct drm_i915_private *dev_priv = m->private; 3978 const uint16_t *latencies; 3979 3980 if (INTEL_GEN(dev_priv) >= 9) 3981 latencies = dev_priv->wm.skl_latency; 3982 else 3983 latencies = dev_priv->wm.cur_latency; 3984 3985 wm_latency_show(m, latencies); 3986 3987 return 0; 3988 } 3989 3990 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3991 { 3992 struct drm_i915_private *dev_priv = inode->i_private; 3993 3994 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 3995 return -ENODEV; 3996 3997 return single_open(file, pri_wm_latency_show, dev_priv); 3998 } 3999 4000 static int spr_wm_latency_open(struct inode *inode, struct file *file) 4001 { 4002 struct drm_i915_private *dev_priv = inode->i_private; 4003 4004 if (HAS_GMCH_DISPLAY(dev_priv)) 4005 return -ENODEV; 4006 4007 return single_open(file, spr_wm_latency_show, dev_priv); 4008 } 4009 4010 static int cur_wm_latency_open(struct inode *inode, struct file *file) 4011 { 4012 struct drm_i915_private *dev_priv = inode->i_private; 4013 4014 if (HAS_GMCH_DISPLAY(dev_priv)) 4015 return -ENODEV; 4016 4017 return single_open(file, cur_wm_latency_show, dev_priv); 4018 } 4019 4020 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 4021 size_t len, loff_t *offp, uint16_t wm[8]) 4022 { 4023 struct seq_file *m = file->private_data; 4024 struct drm_i915_private *dev_priv = m->private; 4025 struct drm_device *dev = &dev_priv->drm; 4026 uint16_t new[8] = { 0 }; 4027 int num_levels; 4028 int level; 4029 int ret; 4030 char tmp[32]; 4031 4032 if (IS_CHERRYVIEW(dev_priv)) 4033 num_levels = 3; 4034 else if (IS_VALLEYVIEW(dev_priv)) 4035 num_levels = 1; 4036 else if (IS_G4X(dev_priv)) 4037 num_levels = 3; 4038 else 4039 num_levels = ilk_wm_max_level(dev_priv) + 1; 4040 4041 if (len >= sizeof(tmp)) 4042 return -EINVAL; 4043 4044 if (copy_from_user(tmp, ubuf, len)) 4045 return -EFAULT; 4046 4047 tmp[len] = '\0'; 4048 4049 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 4050 &new[0], &new[1], &new[2], &new[3], 4051 &new[4], &new[5], &new[6], &new[7]); 4052 if (ret != num_levels) 4053 return -EINVAL; 4054 4055 drm_modeset_lock_all(dev); 4056 4057 for (level = 0; level < num_levels; level++) 4058 wm[level] = new[level]; 4059 4060 drm_modeset_unlock_all(dev); 4061 4062 return len; 4063 } 4064 4065 4066 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 4067 size_t len, loff_t *offp) 4068 { 4069 struct seq_file *m = file->private_data; 4070 struct drm_i915_private *dev_priv = m->private; 4071 uint16_t *latencies; 4072 4073 if (INTEL_GEN(dev_priv) >= 9) 4074 latencies = dev_priv->wm.skl_latency; 4075 else 4076 latencies = dev_priv->wm.pri_latency; 4077 4078 return wm_latency_write(file, ubuf, len, offp, latencies); 4079 } 4080 4081 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 4082 size_t len, loff_t *offp) 4083 { 4084 struct seq_file *m = file->private_data; 4085 struct drm_i915_private *dev_priv = m->private; 4086 uint16_t *latencies; 4087 4088 if (INTEL_GEN(dev_priv) >= 9) 4089 latencies = dev_priv->wm.skl_latency; 4090 else 4091 latencies = dev_priv->wm.spr_latency; 4092 4093 return wm_latency_write(file, ubuf, len, offp, latencies); 4094 } 4095 4096 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 4097 size_t len, loff_t *offp) 4098 { 4099 struct seq_file *m = file->private_data; 4100 struct drm_i915_private *dev_priv = m->private; 4101 uint16_t *latencies; 4102 4103 if (INTEL_GEN(dev_priv) >= 9) 4104 latencies = dev_priv->wm.skl_latency; 4105 else 4106 latencies = dev_priv->wm.cur_latency; 4107 4108 return wm_latency_write(file, ubuf, len, offp, latencies); 4109 } 4110 4111 static const struct file_operations i915_pri_wm_latency_fops = { 4112 .owner = THIS_MODULE, 4113 .open = pri_wm_latency_open, 4114 .read = seq_read, 4115 .llseek = seq_lseek, 4116 .release = single_release, 4117 .write = pri_wm_latency_write 4118 }; 4119 4120 static const struct file_operations i915_spr_wm_latency_fops = { 4121 .owner = THIS_MODULE, 4122 .open = spr_wm_latency_open, 4123 .read = seq_read, 4124 .llseek = seq_lseek, 4125 .release = single_release, 4126 .write = spr_wm_latency_write 4127 }; 4128 4129 static const struct file_operations i915_cur_wm_latency_fops = { 4130 .owner = THIS_MODULE, 4131 .open = cur_wm_latency_open, 4132 .read = seq_read, 4133 .llseek = seq_lseek, 4134 .release = single_release, 4135 .write = cur_wm_latency_write 4136 }; 4137 4138 static int 4139 i915_wedged_get(void *data, u64 *val) 4140 { 4141 struct drm_i915_private *dev_priv = data; 4142 4143 *val = i915_terminally_wedged(&dev_priv->gpu_error); 4144 4145 return 0; 4146 } 4147 4148 static int 4149 i915_wedged_set(void *data, u64 val) 4150 { 4151 struct drm_i915_private *i915 = data; 4152 struct intel_engine_cs *engine; 4153 unsigned int tmp; 4154 4155 /* 4156 * There is no safeguard against this debugfs entry colliding 4157 * with the hangcheck calling same i915_handle_error() in 4158 * parallel, causing an explosion. For now we assume that the 4159 * test harness is responsible enough not to inject gpu hangs 4160 * while it is writing to 'i915_wedged' 4161 */ 4162 4163 if (i915_reset_backoff(&i915->gpu_error)) 4164 return -EAGAIN; 4165 4166 for_each_engine_masked(engine, i915, val, tmp) { 4167 engine->hangcheck.seqno = intel_engine_get_seqno(engine); 4168 engine->hangcheck.stalled = true; 4169 } 4170 4171 i915_handle_error(i915, val, "Manually setting wedged to %llu", val); 4172 4173 wait_on_bit(&i915->gpu_error.flags, 4174 I915_RESET_HANDOFF, 4175 TASK_UNINTERRUPTIBLE); 4176 4177 return 0; 4178 } 4179 4180 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 4181 i915_wedged_get, i915_wedged_set, 4182 "%llu\n"); 4183 4184 static int 4185 fault_irq_set(struct drm_i915_private *i915, 4186 unsigned long *irq, 4187 unsigned long val) 4188 { 4189 int err; 4190 4191 err = mutex_lock_interruptible(&i915->drm.struct_mutex); 4192 if (err) 4193 return err; 4194 4195 err = i915_gem_wait_for_idle(i915, 4196 I915_WAIT_LOCKED | 4197 I915_WAIT_INTERRUPTIBLE); 4198 if (err) 4199 goto err_unlock; 4200 4201 *irq = val; 4202 mutex_unlock(&i915->drm.struct_mutex); 4203 4204 /* Flush idle worker to disarm irq */ 4205 while (flush_delayed_work(&i915->gt.idle_work)) 4206 ; 4207 4208 return 0; 4209 4210 err_unlock: 4211 mutex_unlock(&i915->drm.struct_mutex); 4212 return err; 4213 } 4214 4215 static int 4216 i915_ring_missed_irq_get(void *data, u64 *val) 4217 { 4218 struct drm_i915_private *dev_priv = data; 4219 4220 *val = dev_priv->gpu_error.missed_irq_rings; 4221 return 0; 4222 } 4223 4224 static int 4225 i915_ring_missed_irq_set(void *data, u64 val) 4226 { 4227 struct drm_i915_private *i915 = data; 4228 4229 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val); 4230 } 4231 4232 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 4233 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 4234 "0x%08llx\n"); 4235 4236 static int 4237 i915_ring_test_irq_get(void *data, u64 *val) 4238 { 4239 struct drm_i915_private *dev_priv = data; 4240 4241 *val = dev_priv->gpu_error.test_irq_rings; 4242 4243 return 0; 4244 } 4245 4246 static int 4247 i915_ring_test_irq_set(void *data, u64 val) 4248 { 4249 struct drm_i915_private *i915 = data; 4250 4251 val &= INTEL_INFO(i915)->ring_mask; 4252 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4253 4254 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val); 4255 } 4256 4257 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4258 i915_ring_test_irq_get, i915_ring_test_irq_set, 4259 "0x%08llx\n"); 4260 4261 #define DROP_UNBOUND 0x1 4262 #define DROP_BOUND 0x2 4263 #define DROP_RETIRE 0x4 4264 #define DROP_ACTIVE 0x8 4265 #define DROP_FREED 0x10 4266 #define DROP_SHRINK_ALL 0x20 4267 #define DROP_ALL (DROP_UNBOUND | \ 4268 DROP_BOUND | \ 4269 DROP_RETIRE | \ 4270 DROP_ACTIVE | \ 4271 DROP_FREED | \ 4272 DROP_SHRINK_ALL) 4273 static int 4274 i915_drop_caches_get(void *data, u64 *val) 4275 { 4276 *val = DROP_ALL; 4277 4278 return 0; 4279 } 4280 4281 static int 4282 i915_drop_caches_set(void *data, u64 val) 4283 { 4284 struct drm_i915_private *dev_priv = data; 4285 struct drm_device *dev = &dev_priv->drm; 4286 int ret = 0; 4287 4288 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 4289 4290 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4291 * on ioctls on -EAGAIN. */ 4292 if (val & (DROP_ACTIVE | DROP_RETIRE)) { 4293 ret = mutex_lock_interruptible(&dev->struct_mutex); 4294 if (ret) 4295 return ret; 4296 4297 if (val & DROP_ACTIVE) 4298 ret = i915_gem_wait_for_idle(dev_priv, 4299 I915_WAIT_INTERRUPTIBLE | 4300 I915_WAIT_LOCKED); 4301 4302 if (val & DROP_RETIRE) 4303 i915_gem_retire_requests(dev_priv); 4304 4305 mutex_unlock(&dev->struct_mutex); 4306 } 4307 4308 lockdep_set_current_reclaim_state(GFP_KERNEL); 4309 if (val & DROP_BOUND) 4310 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4311 4312 if (val & DROP_UNBOUND) 4313 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND); 4314 4315 if (val & DROP_SHRINK_ALL) 4316 i915_gem_shrink_all(dev_priv); 4317 lockdep_clear_current_reclaim_state(); 4318 4319 if (val & DROP_FREED) { 4320 synchronize_rcu(); 4321 i915_gem_drain_freed_objects(dev_priv); 4322 } 4323 4324 return ret; 4325 } 4326 4327 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4328 i915_drop_caches_get, i915_drop_caches_set, 4329 "0x%08llx\n"); 4330 4331 static int 4332 i915_max_freq_get(void *data, u64 *val) 4333 { 4334 struct drm_i915_private *dev_priv = data; 4335 4336 if (INTEL_GEN(dev_priv) < 6) 4337 return -ENODEV; 4338 4339 *val = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); 4340 return 0; 4341 } 4342 4343 static int 4344 i915_max_freq_set(void *data, u64 val) 4345 { 4346 struct drm_i915_private *dev_priv = data; 4347 u32 hw_max, hw_min; 4348 int ret; 4349 4350 if (INTEL_GEN(dev_priv) < 6) 4351 return -ENODEV; 4352 4353 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4354 4355 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4356 if (ret) 4357 return ret; 4358 4359 /* 4360 * Turbo will still be enabled, but won't go above the set value. 4361 */ 4362 val = intel_freq_opcode(dev_priv, val); 4363 4364 hw_max = dev_priv->rps.max_freq; 4365 hw_min = dev_priv->rps.min_freq; 4366 4367 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { 4368 mutex_unlock(&dev_priv->rps.hw_lock); 4369 return -EINVAL; 4370 } 4371 4372 dev_priv->rps.max_freq_softlimit = val; 4373 4374 if (intel_set_rps(dev_priv, val)) 4375 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4376 4377 mutex_unlock(&dev_priv->rps.hw_lock); 4378 4379 return 0; 4380 } 4381 4382 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4383 i915_max_freq_get, i915_max_freq_set, 4384 "%llu\n"); 4385 4386 static int 4387 i915_min_freq_get(void *data, u64 *val) 4388 { 4389 struct drm_i915_private *dev_priv = data; 4390 4391 if (INTEL_GEN(dev_priv) < 6) 4392 return -ENODEV; 4393 4394 *val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); 4395 return 0; 4396 } 4397 4398 static int 4399 i915_min_freq_set(void *data, u64 val) 4400 { 4401 struct drm_i915_private *dev_priv = data; 4402 u32 hw_max, hw_min; 4403 int ret; 4404 4405 if (INTEL_GEN(dev_priv) < 6) 4406 return -ENODEV; 4407 4408 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4409 4410 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); 4411 if (ret) 4412 return ret; 4413 4414 /* 4415 * Turbo will still be enabled, but won't go below the set value. 4416 */ 4417 val = intel_freq_opcode(dev_priv, val); 4418 4419 hw_max = dev_priv->rps.max_freq; 4420 hw_min = dev_priv->rps.min_freq; 4421 4422 if (val < hw_min || 4423 val > hw_max || val > dev_priv->rps.max_freq_softlimit) { 4424 mutex_unlock(&dev_priv->rps.hw_lock); 4425 return -EINVAL; 4426 } 4427 4428 dev_priv->rps.min_freq_softlimit = val; 4429 4430 if (intel_set_rps(dev_priv, val)) 4431 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4432 4433 mutex_unlock(&dev_priv->rps.hw_lock); 4434 4435 return 0; 4436 } 4437 4438 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 4439 i915_min_freq_get, i915_min_freq_set, 4440 "%llu\n"); 4441 4442 static int 4443 i915_cache_sharing_get(void *data, u64 *val) 4444 { 4445 struct drm_i915_private *dev_priv = data; 4446 u32 snpcr; 4447 4448 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4449 return -ENODEV; 4450 4451 intel_runtime_pm_get(dev_priv); 4452 4453 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4454 4455 intel_runtime_pm_put(dev_priv); 4456 4457 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 4458 4459 return 0; 4460 } 4461 4462 static int 4463 i915_cache_sharing_set(void *data, u64 val) 4464 { 4465 struct drm_i915_private *dev_priv = data; 4466 u32 snpcr; 4467 4468 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4469 return -ENODEV; 4470 4471 if (val > 3) 4472 return -EINVAL; 4473 4474 intel_runtime_pm_get(dev_priv); 4475 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 4476 4477 /* Update the cache sharing policy here as well */ 4478 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4479 snpcr &= ~GEN6_MBC_SNPCR_MASK; 4480 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 4481 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 4482 4483 intel_runtime_pm_put(dev_priv); 4484 return 0; 4485 } 4486 4487 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 4488 i915_cache_sharing_get, i915_cache_sharing_set, 4489 "%llu\n"); 4490 4491 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv, 4492 struct sseu_dev_info *sseu) 4493 { 4494 int ss_max = 2; 4495 int ss; 4496 u32 sig1[ss_max], sig2[ss_max]; 4497 4498 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 4499 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 4500 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 4501 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 4502 4503 for (ss = 0; ss < ss_max; ss++) { 4504 unsigned int eu_cnt; 4505 4506 if (sig1[ss] & CHV_SS_PG_ENABLE) 4507 /* skip disabled subslice */ 4508 continue; 4509 4510 sseu->slice_mask = BIT(0); 4511 sseu->subslice_mask |= BIT(ss); 4512 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 4513 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 4514 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 4515 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 4516 sseu->eu_total += eu_cnt; 4517 sseu->eu_per_subslice = max_t(unsigned int, 4518 sseu->eu_per_subslice, eu_cnt); 4519 } 4520 } 4521 4522 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, 4523 struct sseu_dev_info *sseu) 4524 { 4525 int s_max = 3, ss_max = 4; 4526 int s, ss; 4527 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 4528 4529 /* BXT has a single slice and at most 3 subslices. */ 4530 if (IS_GEN9_LP(dev_priv)) { 4531 s_max = 1; 4532 ss_max = 3; 4533 } 4534 4535 for (s = 0; s < s_max; s++) { 4536 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 4537 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 4538 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 4539 } 4540 4541 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 4542 GEN9_PGCTL_SSA_EU19_ACK | 4543 GEN9_PGCTL_SSA_EU210_ACK | 4544 GEN9_PGCTL_SSA_EU311_ACK; 4545 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 4546 GEN9_PGCTL_SSB_EU19_ACK | 4547 GEN9_PGCTL_SSB_EU210_ACK | 4548 GEN9_PGCTL_SSB_EU311_ACK; 4549 4550 for (s = 0; s < s_max; s++) { 4551 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4552 /* skip disabled slice */ 4553 continue; 4554 4555 sseu->slice_mask |= BIT(s); 4556 4557 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) 4558 sseu->subslice_mask = 4559 INTEL_INFO(dev_priv)->sseu.subslice_mask; 4560 4561 for (ss = 0; ss < ss_max; ss++) { 4562 unsigned int eu_cnt; 4563 4564 if (IS_GEN9_LP(dev_priv)) { 4565 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 4566 /* skip disabled subslice */ 4567 continue; 4568 4569 sseu->subslice_mask |= BIT(ss); 4570 } 4571 4572 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 4573 eu_mask[ss%2]); 4574 sseu->eu_total += eu_cnt; 4575 sseu->eu_per_subslice = max_t(unsigned int, 4576 sseu->eu_per_subslice, 4577 eu_cnt); 4578 } 4579 } 4580 } 4581 4582 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, 4583 struct sseu_dev_info *sseu) 4584 { 4585 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 4586 int s; 4587 4588 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; 4589 4590 if (sseu->slice_mask) { 4591 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask; 4592 sseu->eu_per_subslice = 4593 INTEL_INFO(dev_priv)->sseu.eu_per_subslice; 4594 sseu->eu_total = sseu->eu_per_subslice * 4595 sseu_subslice_total(sseu); 4596 4597 /* subtract fused off EU(s) from enabled slice(s) */ 4598 for (s = 0; s < fls(sseu->slice_mask); s++) { 4599 u8 subslice_7eu = 4600 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s]; 4601 4602 sseu->eu_total -= hweight8(subslice_7eu); 4603 } 4604 } 4605 } 4606 4607 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, 4608 const struct sseu_dev_info *sseu) 4609 { 4610 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4611 const char *type = is_available_info ? "Available" : "Enabled"; 4612 4613 seq_printf(m, " %s Slice Mask: %04x\n", type, 4614 sseu->slice_mask); 4615 seq_printf(m, " %s Slice Total: %u\n", type, 4616 hweight8(sseu->slice_mask)); 4617 seq_printf(m, " %s Subslice Total: %u\n", type, 4618 sseu_subslice_total(sseu)); 4619 seq_printf(m, " %s Subslice Mask: %04x\n", type, 4620 sseu->subslice_mask); 4621 seq_printf(m, " %s Subslice Per Slice: %u\n", type, 4622 hweight8(sseu->subslice_mask)); 4623 seq_printf(m, " %s EU Total: %u\n", type, 4624 sseu->eu_total); 4625 seq_printf(m, " %s EU Per Subslice: %u\n", type, 4626 sseu->eu_per_subslice); 4627 4628 if (!is_available_info) 4629 return; 4630 4631 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv))); 4632 if (HAS_POOLED_EU(dev_priv)) 4633 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool); 4634 4635 seq_printf(m, " Has Slice Power Gating: %s\n", 4636 yesno(sseu->has_slice_pg)); 4637 seq_printf(m, " Has Subslice Power Gating: %s\n", 4638 yesno(sseu->has_subslice_pg)); 4639 seq_printf(m, " Has EU Power Gating: %s\n", 4640 yesno(sseu->has_eu_pg)); 4641 } 4642 4643 static int i915_sseu_status(struct seq_file *m, void *unused) 4644 { 4645 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4646 struct sseu_dev_info sseu; 4647 4648 if (INTEL_GEN(dev_priv) < 8) 4649 return -ENODEV; 4650 4651 seq_puts(m, "SSEU Device Info\n"); 4652 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu); 4653 4654 seq_puts(m, "SSEU Device Status\n"); 4655 memset(&sseu, 0, sizeof(sseu)); 4656 4657 intel_runtime_pm_get(dev_priv); 4658 4659 if (IS_CHERRYVIEW(dev_priv)) { 4660 cherryview_sseu_device_status(dev_priv, &sseu); 4661 } else if (IS_BROADWELL(dev_priv)) { 4662 broadwell_sseu_device_status(dev_priv, &sseu); 4663 } else if (INTEL_GEN(dev_priv) >= 9) { 4664 gen9_sseu_device_status(dev_priv, &sseu); 4665 } 4666 4667 intel_runtime_pm_put(dev_priv); 4668 4669 i915_print_sseu_info(m, false, &sseu); 4670 4671 return 0; 4672 } 4673 4674 static int i915_forcewake_open(struct inode *inode, struct file *file) 4675 { 4676 struct drm_i915_private *dev_priv = inode->i_private; 4677 4678 if (INTEL_GEN(dev_priv) < 6) 4679 return 0; 4680 4681 intel_runtime_pm_get(dev_priv); 4682 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4683 4684 return 0; 4685 } 4686 4687 static int i915_forcewake_release(struct inode *inode, struct file *file) 4688 { 4689 struct drm_i915_private *dev_priv = inode->i_private; 4690 4691 if (INTEL_GEN(dev_priv) < 6) 4692 return 0; 4693 4694 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4695 intel_runtime_pm_put(dev_priv); 4696 4697 return 0; 4698 } 4699 4700 static const struct file_operations i915_forcewake_fops = { 4701 .owner = THIS_MODULE, 4702 .open = i915_forcewake_open, 4703 .release = i915_forcewake_release, 4704 }; 4705 4706 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 4707 { 4708 struct drm_i915_private *dev_priv = m->private; 4709 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4710 4711 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 4712 seq_printf(m, "Detected: %s\n", 4713 yesno(delayed_work_pending(&hotplug->reenable_work))); 4714 4715 return 0; 4716 } 4717 4718 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 4719 const char __user *ubuf, size_t len, 4720 loff_t *offp) 4721 { 4722 struct seq_file *m = file->private_data; 4723 struct drm_i915_private *dev_priv = m->private; 4724 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4725 unsigned int new_threshold; 4726 int i; 4727 char *newline; 4728 char tmp[16]; 4729 4730 if (len >= sizeof(tmp)) 4731 return -EINVAL; 4732 4733 if (copy_from_user(tmp, ubuf, len)) 4734 return -EFAULT; 4735 4736 tmp[len] = '\0'; 4737 4738 /* Strip newline, if any */ 4739 newline = strchr(tmp, '\n'); 4740 if (newline) 4741 *newline = '\0'; 4742 4743 if (strcmp(tmp, "reset") == 0) 4744 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4745 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 4746 return -EINVAL; 4747 4748 if (new_threshold > 0) 4749 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n", 4750 new_threshold); 4751 else 4752 DRM_DEBUG_KMS("Disabling HPD storm detection\n"); 4753 4754 spin_lock_irq(&dev_priv->irq_lock); 4755 hotplug->hpd_storm_threshold = new_threshold; 4756 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 4757 for_each_hpd_pin(i) 4758 hotplug->stats[i].count = 0; 4759 spin_unlock_irq(&dev_priv->irq_lock); 4760 4761 /* Re-enable hpd immediately if we were in an irq storm */ 4762 flush_delayed_work(&dev_priv->hotplug.reenable_work); 4763 4764 return len; 4765 } 4766 4767 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 4768 { 4769 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 4770 } 4771 4772 static const struct file_operations i915_hpd_storm_ctl_fops = { 4773 .owner = THIS_MODULE, 4774 .open = i915_hpd_storm_ctl_open, 4775 .read = seq_read, 4776 .llseek = seq_lseek, 4777 .release = single_release, 4778 .write = i915_hpd_storm_ctl_write 4779 }; 4780 4781 static const struct drm_info_list i915_debugfs_list[] = { 4782 {"i915_capabilities", i915_capabilities, 0}, 4783 {"i915_gem_objects", i915_gem_object_info, 0}, 4784 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 4785 {"i915_gem_pin_display", i915_gem_gtt_info, 0, (void *)1}, 4786 {"i915_gem_stolen", i915_gem_stolen_list_info }, 4787 {"i915_gem_request", i915_gem_request_info, 0}, 4788 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 4789 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 4790 {"i915_gem_interrupt", i915_interrupt_info, 0}, 4791 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 4792 {"i915_guc_info", i915_guc_info, 0}, 4793 {"i915_guc_load_status", i915_guc_load_status_info, 0}, 4794 {"i915_guc_log_dump", i915_guc_log_dump, 0}, 4795 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1}, 4796 {"i915_guc_stage_pool", i915_guc_stage_pool, 0}, 4797 {"i915_huc_load_status", i915_huc_load_status_info, 0}, 4798 {"i915_frequency_info", i915_frequency_info, 0}, 4799 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 4800 {"i915_reset_info", i915_reset_info, 0}, 4801 {"i915_drpc_info", i915_drpc_info, 0}, 4802 {"i915_emon_status", i915_emon_status, 0}, 4803 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 4804 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 4805 {"i915_fbc_status", i915_fbc_status, 0}, 4806 {"i915_ips_status", i915_ips_status, 0}, 4807 {"i915_sr_status", i915_sr_status, 0}, 4808 {"i915_opregion", i915_opregion, 0}, 4809 {"i915_vbt", i915_vbt, 0}, 4810 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 4811 {"i915_context_status", i915_context_status, 0}, 4812 {"i915_dump_lrc", i915_dump_lrc, 0}, 4813 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 4814 {"i915_swizzle_info", i915_swizzle_info, 0}, 4815 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 4816 {"i915_llc", i915_llc, 0}, 4817 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 4818 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 4819 {"i915_energy_uJ", i915_energy_uJ, 0}, 4820 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 4821 {"i915_power_domain_info", i915_power_domain_info, 0}, 4822 {"i915_dmc_info", i915_dmc_info, 0}, 4823 {"i915_display_info", i915_display_info, 0}, 4824 {"i915_engine_info", i915_engine_info, 0}, 4825 {"i915_semaphore_status", i915_semaphore_status, 0}, 4826 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4827 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4828 {"i915_wa_registers", i915_wa_registers, 0}, 4829 {"i915_ddb_info", i915_ddb_info, 0}, 4830 {"i915_sseu_status", i915_sseu_status, 0}, 4831 {"i915_drrs_status", i915_drrs_status, 0}, 4832 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 4833 }; 4834 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4835 4836 static const struct i915_debugfs_files { 4837 const char *name; 4838 const struct file_operations *fops; 4839 } i915_debugfs_files[] = { 4840 {"i915_wedged", &i915_wedged_fops}, 4841 {"i915_max_freq", &i915_max_freq_fops}, 4842 {"i915_min_freq", &i915_min_freq_fops}, 4843 {"i915_cache_sharing", &i915_cache_sharing_fops}, 4844 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 4845 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 4846 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 4847 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 4848 {"i915_error_state", &i915_error_state_fops}, 4849 {"i915_gpu_info", &i915_gpu_info_fops}, 4850 #endif 4851 {"i915_next_seqno", &i915_next_seqno_fops}, 4852 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 4853 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4854 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4855 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4856 {"i915_fbc_false_color", &i915_fbc_false_color_fops}, 4857 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 4858 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 4859 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 4860 {"i915_guc_log_control", &i915_guc_log_control_fops}, 4861 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops} 4862 }; 4863 4864 int i915_debugfs_register(struct drm_i915_private *dev_priv) 4865 { 4866 struct drm_minor *minor = dev_priv->drm.primary; 4867 struct dentry *ent; 4868 int ret, i; 4869 4870 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR, 4871 minor->debugfs_root, to_i915(minor->dev), 4872 &i915_forcewake_fops); 4873 if (!ent) 4874 return -ENOMEM; 4875 4876 ret = intel_pipe_crc_create(minor); 4877 if (ret) 4878 return ret; 4879 4880 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4881 ent = debugfs_create_file(i915_debugfs_files[i].name, 4882 S_IRUGO | S_IWUSR, 4883 minor->debugfs_root, 4884 to_i915(minor->dev), 4885 i915_debugfs_files[i].fops); 4886 if (!ent) 4887 return -ENOMEM; 4888 } 4889 4890 return drm_debugfs_create_files(i915_debugfs_list, 4891 I915_DEBUGFS_ENTRIES, 4892 minor->debugfs_root, minor); 4893 } 4894 4895 struct dpcd_block { 4896 /* DPCD dump start address. */ 4897 unsigned int offset; 4898 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 4899 unsigned int end; 4900 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 4901 size_t size; 4902 /* Only valid for eDP. */ 4903 bool edp; 4904 }; 4905 4906 static const struct dpcd_block i915_dpcd_debug[] = { 4907 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 4908 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 4909 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 4910 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 4911 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 4912 { .offset = DP_SET_POWER }, 4913 { .offset = DP_EDP_DPCD_REV }, 4914 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 4915 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 4916 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 4917 }; 4918 4919 static int i915_dpcd_show(struct seq_file *m, void *data) 4920 { 4921 struct drm_connector *connector = m->private; 4922 struct intel_dp *intel_dp = 4923 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4924 uint8_t buf[16]; 4925 ssize_t err; 4926 int i; 4927 4928 if (connector->status != connector_status_connected) 4929 return -ENODEV; 4930 4931 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 4932 const struct dpcd_block *b = &i915_dpcd_debug[i]; 4933 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 4934 4935 if (b->edp && 4936 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 4937 continue; 4938 4939 /* low tech for now */ 4940 if (WARN_ON(size > sizeof(buf))) 4941 continue; 4942 4943 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 4944 if (err <= 0) { 4945 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 4946 size, b->offset, err); 4947 continue; 4948 } 4949 4950 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 4951 } 4952 4953 return 0; 4954 } 4955 4956 static int i915_dpcd_open(struct inode *inode, struct file *file) 4957 { 4958 return single_open(file, i915_dpcd_show, inode->i_private); 4959 } 4960 4961 static const struct file_operations i915_dpcd_fops = { 4962 .owner = THIS_MODULE, 4963 .open = i915_dpcd_open, 4964 .read = seq_read, 4965 .llseek = seq_lseek, 4966 .release = single_release, 4967 }; 4968 4969 static int i915_panel_show(struct seq_file *m, void *data) 4970 { 4971 struct drm_connector *connector = m->private; 4972 struct intel_dp *intel_dp = 4973 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4974 4975 if (connector->status != connector_status_connected) 4976 return -ENODEV; 4977 4978 seq_printf(m, "Panel power up delay: %d\n", 4979 intel_dp->panel_power_up_delay); 4980 seq_printf(m, "Panel power down delay: %d\n", 4981 intel_dp->panel_power_down_delay); 4982 seq_printf(m, "Backlight on delay: %d\n", 4983 intel_dp->backlight_on_delay); 4984 seq_printf(m, "Backlight off delay: %d\n", 4985 intel_dp->backlight_off_delay); 4986 4987 return 0; 4988 } 4989 4990 static int i915_panel_open(struct inode *inode, struct file *file) 4991 { 4992 return single_open(file, i915_panel_show, inode->i_private); 4993 } 4994 4995 static const struct file_operations i915_panel_fops = { 4996 .owner = THIS_MODULE, 4997 .open = i915_panel_open, 4998 .read = seq_read, 4999 .llseek = seq_lseek, 5000 .release = single_release, 5001 }; 5002 5003 /** 5004 * i915_debugfs_connector_add - add i915 specific connector debugfs files 5005 * @connector: pointer to a registered drm_connector 5006 * 5007 * Cleanup will be done by drm_connector_unregister() through a call to 5008 * drm_debugfs_connector_remove(). 5009 * 5010 * Returns 0 on success, negative error codes on error. 5011 */ 5012 int i915_debugfs_connector_add(struct drm_connector *connector) 5013 { 5014 struct dentry *root = connector->debugfs_entry; 5015 5016 /* The connector must have been registered beforehands. */ 5017 if (!root) 5018 return -ENODEV; 5019 5020 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 5021 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5022 debugfs_create_file("i915_dpcd", S_IRUGO, root, 5023 connector, &i915_dpcd_fops); 5024 5025 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 5026 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 5027 connector, &i915_panel_fops); 5028 5029 return 0; 5030 } 5031