1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Keith Packard <keithp@keithp.com> 26 * 27 */ 28 29 #include <linux/debugfs.h> 30 #include <linux/sort.h> 31 #include <linux/sched/mm.h> 32 #include "intel_drv.h" 33 #include "intel_guc_submission.h" 34 35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) 36 { 37 return to_i915(node->minor->dev); 38 } 39 40 static int i915_capabilities(struct seq_file *m, void *data) 41 { 42 struct drm_i915_private *dev_priv = node_to_i915(m->private); 43 const struct intel_device_info *info = INTEL_INFO(dev_priv); 44 struct drm_printer p = drm_seq_file_printer(m); 45 46 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); 47 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); 48 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); 49 50 intel_device_info_dump_flags(info, &p); 51 intel_device_info_dump_runtime(info, &p); 52 53 kernel_param_lock(THIS_MODULE); 54 i915_params_dump(&i915_modparams, &p); 55 kernel_param_unlock(THIS_MODULE); 56 57 return 0; 58 } 59 60 static char get_active_flag(struct drm_i915_gem_object *obj) 61 { 62 return i915_gem_object_is_active(obj) ? '*' : ' '; 63 } 64 65 static char get_pin_flag(struct drm_i915_gem_object *obj) 66 { 67 return obj->pin_global ? 'p' : ' '; 68 } 69 70 static char get_tiling_flag(struct drm_i915_gem_object *obj) 71 { 72 switch (i915_gem_object_get_tiling(obj)) { 73 default: 74 case I915_TILING_NONE: return ' '; 75 case I915_TILING_X: return 'X'; 76 case I915_TILING_Y: return 'Y'; 77 } 78 } 79 80 static char get_global_flag(struct drm_i915_gem_object *obj) 81 { 82 return obj->userfault_count ? 'g' : ' '; 83 } 84 85 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 86 { 87 return obj->mm.mapping ? 'M' : ' '; 88 } 89 90 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) 91 { 92 u64 size = 0; 93 struct i915_vma *vma; 94 95 for_each_ggtt_vma(vma, obj) { 96 if (drm_mm_node_allocated(&vma->node)) 97 size += vma->node.size; 98 } 99 100 return size; 101 } 102 103 static const char * 104 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len) 105 { 106 size_t x = 0; 107 108 switch (page_sizes) { 109 case 0: 110 return ""; 111 case I915_GTT_PAGE_SIZE_4K: 112 return "4K"; 113 case I915_GTT_PAGE_SIZE_64K: 114 return "64K"; 115 case I915_GTT_PAGE_SIZE_2M: 116 return "2M"; 117 default: 118 if (!buf) 119 return "M"; 120 121 if (page_sizes & I915_GTT_PAGE_SIZE_2M) 122 x += snprintf(buf + x, len - x, "2M, "); 123 if (page_sizes & I915_GTT_PAGE_SIZE_64K) 124 x += snprintf(buf + x, len - x, "64K, "); 125 if (page_sizes & I915_GTT_PAGE_SIZE_4K) 126 x += snprintf(buf + x, len - x, "4K, "); 127 buf[x-2] = '\0'; 128 129 return buf; 130 } 131 } 132 133 static void 134 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 135 { 136 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 137 struct intel_engine_cs *engine; 138 struct i915_vma *vma; 139 unsigned int frontbuffer_bits; 140 int pin_count = 0; 141 142 lockdep_assert_held(&obj->base.dev->struct_mutex); 143 144 seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s", 145 &obj->base, 146 get_active_flag(obj), 147 get_pin_flag(obj), 148 get_tiling_flag(obj), 149 get_global_flag(obj), 150 get_pin_mapped_flag(obj), 151 obj->base.size / 1024, 152 obj->base.read_domains, 153 obj->base.write_domain, 154 i915_cache_level_str(dev_priv, obj->cache_level), 155 obj->mm.dirty ? " dirty" : "", 156 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); 157 if (obj->base.name) 158 seq_printf(m, " (name: %d)", obj->base.name); 159 list_for_each_entry(vma, &obj->vma_list, obj_link) { 160 if (i915_vma_is_pinned(vma)) 161 pin_count++; 162 } 163 seq_printf(m, " (pinned x %d)", pin_count); 164 if (obj->pin_global) 165 seq_printf(m, " (global)"); 166 list_for_each_entry(vma, &obj->vma_list, obj_link) { 167 if (!drm_mm_node_allocated(&vma->node)) 168 continue; 169 170 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s", 171 i915_vma_is_ggtt(vma) ? "g" : "pp", 172 vma->node.start, vma->node.size, 173 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0)); 174 if (i915_vma_is_ggtt(vma)) { 175 switch (vma->ggtt_view.type) { 176 case I915_GGTT_VIEW_NORMAL: 177 seq_puts(m, ", normal"); 178 break; 179 180 case I915_GGTT_VIEW_PARTIAL: 181 seq_printf(m, ", partial [%08llx+%x]", 182 vma->ggtt_view.partial.offset << PAGE_SHIFT, 183 vma->ggtt_view.partial.size << PAGE_SHIFT); 184 break; 185 186 case I915_GGTT_VIEW_ROTATED: 187 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]", 188 vma->ggtt_view.rotated.plane[0].width, 189 vma->ggtt_view.rotated.plane[0].height, 190 vma->ggtt_view.rotated.plane[0].stride, 191 vma->ggtt_view.rotated.plane[0].offset, 192 vma->ggtt_view.rotated.plane[1].width, 193 vma->ggtt_view.rotated.plane[1].height, 194 vma->ggtt_view.rotated.plane[1].stride, 195 vma->ggtt_view.rotated.plane[1].offset); 196 break; 197 198 default: 199 MISSING_CASE(vma->ggtt_view.type); 200 break; 201 } 202 } 203 if (vma->fence) 204 seq_printf(m, " , fence: %d%s", 205 vma->fence->id, 206 i915_gem_active_isset(&vma->last_fence) ? "*" : ""); 207 seq_puts(m, ")"); 208 } 209 if (obj->stolen) 210 seq_printf(m, " (stolen: %08llx)", obj->stolen->start); 211 212 engine = i915_gem_object_last_write_engine(obj); 213 if (engine) 214 seq_printf(m, " (%s)", engine->name); 215 216 frontbuffer_bits = atomic_read(&obj->frontbuffer_bits); 217 if (frontbuffer_bits) 218 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits); 219 } 220 221 static int obj_rank_by_stolen(const void *A, const void *B) 222 { 223 const struct drm_i915_gem_object *a = 224 *(const struct drm_i915_gem_object **)A; 225 const struct drm_i915_gem_object *b = 226 *(const struct drm_i915_gem_object **)B; 227 228 if (a->stolen->start < b->stolen->start) 229 return -1; 230 if (a->stolen->start > b->stolen->start) 231 return 1; 232 return 0; 233 } 234 235 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) 236 { 237 struct drm_i915_private *dev_priv = node_to_i915(m->private); 238 struct drm_device *dev = &dev_priv->drm; 239 struct drm_i915_gem_object **objects; 240 struct drm_i915_gem_object *obj; 241 u64 total_obj_size, total_gtt_size; 242 unsigned long total, count, n; 243 int ret; 244 245 total = READ_ONCE(dev_priv->mm.object_count); 246 objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL); 247 if (!objects) 248 return -ENOMEM; 249 250 ret = mutex_lock_interruptible(&dev->struct_mutex); 251 if (ret) 252 goto out; 253 254 total_obj_size = total_gtt_size = count = 0; 255 256 spin_lock(&dev_priv->mm.obj_lock); 257 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 258 if (count == total) 259 break; 260 261 if (obj->stolen == NULL) 262 continue; 263 264 objects[count++] = obj; 265 total_obj_size += obj->base.size; 266 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 267 268 } 269 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { 270 if (count == total) 271 break; 272 273 if (obj->stolen == NULL) 274 continue; 275 276 objects[count++] = obj; 277 total_obj_size += obj->base.size; 278 } 279 spin_unlock(&dev_priv->mm.obj_lock); 280 281 sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL); 282 283 seq_puts(m, "Stolen:\n"); 284 for (n = 0; n < count; n++) { 285 seq_puts(m, " "); 286 describe_obj(m, objects[n]); 287 seq_putc(m, '\n'); 288 } 289 seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n", 290 count, total_obj_size, total_gtt_size); 291 292 mutex_unlock(&dev->struct_mutex); 293 out: 294 kvfree(objects); 295 return ret; 296 } 297 298 struct file_stats { 299 struct drm_i915_file_private *file_priv; 300 unsigned long count; 301 u64 total, unbound; 302 u64 global, shared; 303 u64 active, inactive; 304 }; 305 306 static int per_file_stats(int id, void *ptr, void *data) 307 { 308 struct drm_i915_gem_object *obj = ptr; 309 struct file_stats *stats = data; 310 struct i915_vma *vma; 311 312 lockdep_assert_held(&obj->base.dev->struct_mutex); 313 314 stats->count++; 315 stats->total += obj->base.size; 316 if (!obj->bind_count) 317 stats->unbound += obj->base.size; 318 if (obj->base.name || obj->base.dma_buf) 319 stats->shared += obj->base.size; 320 321 list_for_each_entry(vma, &obj->vma_list, obj_link) { 322 if (!drm_mm_node_allocated(&vma->node)) 323 continue; 324 325 if (i915_vma_is_ggtt(vma)) { 326 stats->global += vma->node.size; 327 } else { 328 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); 329 330 if (ppgtt->base.file != stats->file_priv) 331 continue; 332 } 333 334 if (i915_vma_is_active(vma)) 335 stats->active += vma->node.size; 336 else 337 stats->inactive += vma->node.size; 338 } 339 340 return 0; 341 } 342 343 #define print_file_stats(m, name, stats) do { \ 344 if (stats.count) \ 345 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \ 346 name, \ 347 stats.count, \ 348 stats.total, \ 349 stats.active, \ 350 stats.inactive, \ 351 stats.global, \ 352 stats.shared, \ 353 stats.unbound); \ 354 } while (0) 355 356 static void print_batch_pool_stats(struct seq_file *m, 357 struct drm_i915_private *dev_priv) 358 { 359 struct drm_i915_gem_object *obj; 360 struct file_stats stats; 361 struct intel_engine_cs *engine; 362 enum intel_engine_id id; 363 int j; 364 365 memset(&stats, 0, sizeof(stats)); 366 367 for_each_engine(engine, dev_priv, id) { 368 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 369 list_for_each_entry(obj, 370 &engine->batch_pool.cache_list[j], 371 batch_pool_link) 372 per_file_stats(0, obj, &stats); 373 } 374 } 375 376 print_file_stats(m, "[k]batch pool", stats); 377 } 378 379 static int per_file_ctx_stats(int id, void *ptr, void *data) 380 { 381 struct i915_gem_context *ctx = ptr; 382 int n; 383 384 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) { 385 if (ctx->engine[n].state) 386 per_file_stats(0, ctx->engine[n].state->obj, data); 387 if (ctx->engine[n].ring) 388 per_file_stats(0, ctx->engine[n].ring->vma->obj, data); 389 } 390 391 return 0; 392 } 393 394 static void print_context_stats(struct seq_file *m, 395 struct drm_i915_private *dev_priv) 396 { 397 struct drm_device *dev = &dev_priv->drm; 398 struct file_stats stats; 399 struct drm_file *file; 400 401 memset(&stats, 0, sizeof(stats)); 402 403 mutex_lock(&dev->struct_mutex); 404 if (dev_priv->kernel_context) 405 per_file_ctx_stats(0, dev_priv->kernel_context, &stats); 406 407 list_for_each_entry(file, &dev->filelist, lhead) { 408 struct drm_i915_file_private *fpriv = file->driver_priv; 409 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats); 410 } 411 mutex_unlock(&dev->struct_mutex); 412 413 print_file_stats(m, "[k]contexts", stats); 414 } 415 416 static int i915_gem_object_info(struct seq_file *m, void *data) 417 { 418 struct drm_i915_private *dev_priv = node_to_i915(m->private); 419 struct drm_device *dev = &dev_priv->drm; 420 struct i915_ggtt *ggtt = &dev_priv->ggtt; 421 u32 count, mapped_count, purgeable_count, dpy_count, huge_count; 422 u64 size, mapped_size, purgeable_size, dpy_size, huge_size; 423 struct drm_i915_gem_object *obj; 424 unsigned int page_sizes = 0; 425 struct drm_file *file; 426 char buf[80]; 427 int ret; 428 429 ret = mutex_lock_interruptible(&dev->struct_mutex); 430 if (ret) 431 return ret; 432 433 seq_printf(m, "%u objects, %llu bytes\n", 434 dev_priv->mm.object_count, 435 dev_priv->mm.object_memory); 436 437 size = count = 0; 438 mapped_size = mapped_count = 0; 439 purgeable_size = purgeable_count = 0; 440 huge_size = huge_count = 0; 441 442 spin_lock(&dev_priv->mm.obj_lock); 443 list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) { 444 size += obj->base.size; 445 ++count; 446 447 if (obj->mm.madv == I915_MADV_DONTNEED) { 448 purgeable_size += obj->base.size; 449 ++purgeable_count; 450 } 451 452 if (obj->mm.mapping) { 453 mapped_count++; 454 mapped_size += obj->base.size; 455 } 456 457 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) { 458 huge_count++; 459 huge_size += obj->base.size; 460 page_sizes |= obj->mm.page_sizes.sg; 461 } 462 } 463 seq_printf(m, "%u unbound objects, %llu bytes\n", count, size); 464 465 size = count = dpy_size = dpy_count = 0; 466 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 467 size += obj->base.size; 468 ++count; 469 470 if (obj->pin_global) { 471 dpy_size += obj->base.size; 472 ++dpy_count; 473 } 474 475 if (obj->mm.madv == I915_MADV_DONTNEED) { 476 purgeable_size += obj->base.size; 477 ++purgeable_count; 478 } 479 480 if (obj->mm.mapping) { 481 mapped_count++; 482 mapped_size += obj->base.size; 483 } 484 485 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) { 486 huge_count++; 487 huge_size += obj->base.size; 488 page_sizes |= obj->mm.page_sizes.sg; 489 } 490 } 491 spin_unlock(&dev_priv->mm.obj_lock); 492 493 seq_printf(m, "%u bound objects, %llu bytes\n", 494 count, size); 495 seq_printf(m, "%u purgeable objects, %llu bytes\n", 496 purgeable_count, purgeable_size); 497 seq_printf(m, "%u mapped objects, %llu bytes\n", 498 mapped_count, mapped_size); 499 seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n", 500 huge_count, 501 stringify_page_sizes(page_sizes, buf, sizeof(buf)), 502 huge_size); 503 seq_printf(m, "%u display objects (globally pinned), %llu bytes\n", 504 dpy_count, dpy_size); 505 506 seq_printf(m, "%llu [%pa] gtt total\n", 507 ggtt->base.total, &ggtt->mappable_end); 508 seq_printf(m, "Supported page sizes: %s\n", 509 stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes, 510 buf, sizeof(buf))); 511 512 seq_putc(m, '\n'); 513 print_batch_pool_stats(m, dev_priv); 514 mutex_unlock(&dev->struct_mutex); 515 516 mutex_lock(&dev->filelist_mutex); 517 print_context_stats(m, dev_priv); 518 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 519 struct file_stats stats; 520 struct drm_i915_file_private *file_priv = file->driver_priv; 521 struct drm_i915_gem_request *request; 522 struct task_struct *task; 523 524 mutex_lock(&dev->struct_mutex); 525 526 memset(&stats, 0, sizeof(stats)); 527 stats.file_priv = file->driver_priv; 528 spin_lock(&file->table_lock); 529 idr_for_each(&file->object_idr, per_file_stats, &stats); 530 spin_unlock(&file->table_lock); 531 /* 532 * Although we have a valid reference on file->pid, that does 533 * not guarantee that the task_struct who called get_pid() is 534 * still alive (e.g. get_pid(current) => fork() => exit()). 535 * Therefore, we need to protect this ->comm access using RCU. 536 */ 537 request = list_first_entry_or_null(&file_priv->mm.request_list, 538 struct drm_i915_gem_request, 539 client_link); 540 rcu_read_lock(); 541 task = pid_task(request && request->ctx->pid ? 542 request->ctx->pid : file->pid, 543 PIDTYPE_PID); 544 print_file_stats(m, task ? task->comm : "<unknown>", stats); 545 rcu_read_unlock(); 546 547 mutex_unlock(&dev->struct_mutex); 548 } 549 mutex_unlock(&dev->filelist_mutex); 550 551 return 0; 552 } 553 554 static int i915_gem_gtt_info(struct seq_file *m, void *data) 555 { 556 struct drm_info_node *node = m->private; 557 struct drm_i915_private *dev_priv = node_to_i915(node); 558 struct drm_device *dev = &dev_priv->drm; 559 struct drm_i915_gem_object **objects; 560 struct drm_i915_gem_object *obj; 561 u64 total_obj_size, total_gtt_size; 562 unsigned long nobject, n; 563 int count, ret; 564 565 nobject = READ_ONCE(dev_priv->mm.object_count); 566 objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL); 567 if (!objects) 568 return -ENOMEM; 569 570 ret = mutex_lock_interruptible(&dev->struct_mutex); 571 if (ret) 572 return ret; 573 574 count = 0; 575 spin_lock(&dev_priv->mm.obj_lock); 576 list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) { 577 objects[count++] = obj; 578 if (count == nobject) 579 break; 580 } 581 spin_unlock(&dev_priv->mm.obj_lock); 582 583 total_obj_size = total_gtt_size = 0; 584 for (n = 0; n < count; n++) { 585 obj = objects[n]; 586 587 seq_puts(m, " "); 588 describe_obj(m, obj); 589 seq_putc(m, '\n'); 590 total_obj_size += obj->base.size; 591 total_gtt_size += i915_gem_obj_total_ggtt_size(obj); 592 } 593 594 mutex_unlock(&dev->struct_mutex); 595 596 seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n", 597 count, total_obj_size, total_gtt_size); 598 kvfree(objects); 599 600 return 0; 601 } 602 603 static int i915_gem_batch_pool_info(struct seq_file *m, void *data) 604 { 605 struct drm_i915_private *dev_priv = node_to_i915(m->private); 606 struct drm_device *dev = &dev_priv->drm; 607 struct drm_i915_gem_object *obj; 608 struct intel_engine_cs *engine; 609 enum intel_engine_id id; 610 int total = 0; 611 int ret, j; 612 613 ret = mutex_lock_interruptible(&dev->struct_mutex); 614 if (ret) 615 return ret; 616 617 for_each_engine(engine, dev_priv, id) { 618 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { 619 int count; 620 621 count = 0; 622 list_for_each_entry(obj, 623 &engine->batch_pool.cache_list[j], 624 batch_pool_link) 625 count++; 626 seq_printf(m, "%s cache[%d]: %d objects\n", 627 engine->name, j, count); 628 629 list_for_each_entry(obj, 630 &engine->batch_pool.cache_list[j], 631 batch_pool_link) { 632 seq_puts(m, " "); 633 describe_obj(m, obj); 634 seq_putc(m, '\n'); 635 } 636 637 total += count; 638 } 639 } 640 641 seq_printf(m, "total: %d\n", total); 642 643 mutex_unlock(&dev->struct_mutex); 644 645 return 0; 646 } 647 648 static int i915_interrupt_info(struct seq_file *m, void *data) 649 { 650 struct drm_i915_private *dev_priv = node_to_i915(m->private); 651 struct intel_engine_cs *engine; 652 enum intel_engine_id id; 653 int i, pipe; 654 655 intel_runtime_pm_get(dev_priv); 656 657 if (IS_CHERRYVIEW(dev_priv)) { 658 seq_printf(m, "Master Interrupt Control:\t%08x\n", 659 I915_READ(GEN8_MASTER_IRQ)); 660 661 seq_printf(m, "Display IER:\t%08x\n", 662 I915_READ(VLV_IER)); 663 seq_printf(m, "Display IIR:\t%08x\n", 664 I915_READ(VLV_IIR)); 665 seq_printf(m, "Display IIR_RW:\t%08x\n", 666 I915_READ(VLV_IIR_RW)); 667 seq_printf(m, "Display IMR:\t%08x\n", 668 I915_READ(VLV_IMR)); 669 for_each_pipe(dev_priv, pipe) { 670 enum intel_display_power_domain power_domain; 671 672 power_domain = POWER_DOMAIN_PIPE(pipe); 673 if (!intel_display_power_get_if_enabled(dev_priv, 674 power_domain)) { 675 seq_printf(m, "Pipe %c power disabled\n", 676 pipe_name(pipe)); 677 continue; 678 } 679 680 seq_printf(m, "Pipe %c stat:\t%08x\n", 681 pipe_name(pipe), 682 I915_READ(PIPESTAT(pipe))); 683 684 intel_display_power_put(dev_priv, power_domain); 685 } 686 687 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 688 seq_printf(m, "Port hotplug:\t%08x\n", 689 I915_READ(PORT_HOTPLUG_EN)); 690 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 691 I915_READ(VLV_DPFLIPSTAT)); 692 seq_printf(m, "DPINVGTT:\t%08x\n", 693 I915_READ(DPINVGTT)); 694 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 695 696 for (i = 0; i < 4; i++) { 697 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 698 i, I915_READ(GEN8_GT_IMR(i))); 699 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 700 i, I915_READ(GEN8_GT_IIR(i))); 701 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 702 i, I915_READ(GEN8_GT_IER(i))); 703 } 704 705 seq_printf(m, "PCU interrupt mask:\t%08x\n", 706 I915_READ(GEN8_PCU_IMR)); 707 seq_printf(m, "PCU interrupt identity:\t%08x\n", 708 I915_READ(GEN8_PCU_IIR)); 709 seq_printf(m, "PCU interrupt enable:\t%08x\n", 710 I915_READ(GEN8_PCU_IER)); 711 } else if (INTEL_GEN(dev_priv) >= 8) { 712 seq_printf(m, "Master Interrupt Control:\t%08x\n", 713 I915_READ(GEN8_MASTER_IRQ)); 714 715 for (i = 0; i < 4; i++) { 716 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", 717 i, I915_READ(GEN8_GT_IMR(i))); 718 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", 719 i, I915_READ(GEN8_GT_IIR(i))); 720 seq_printf(m, "GT Interrupt IER %d:\t%08x\n", 721 i, I915_READ(GEN8_GT_IER(i))); 722 } 723 724 for_each_pipe(dev_priv, pipe) { 725 enum intel_display_power_domain power_domain; 726 727 power_domain = POWER_DOMAIN_PIPE(pipe); 728 if (!intel_display_power_get_if_enabled(dev_priv, 729 power_domain)) { 730 seq_printf(m, "Pipe %c power disabled\n", 731 pipe_name(pipe)); 732 continue; 733 } 734 seq_printf(m, "Pipe %c IMR:\t%08x\n", 735 pipe_name(pipe), 736 I915_READ(GEN8_DE_PIPE_IMR(pipe))); 737 seq_printf(m, "Pipe %c IIR:\t%08x\n", 738 pipe_name(pipe), 739 I915_READ(GEN8_DE_PIPE_IIR(pipe))); 740 seq_printf(m, "Pipe %c IER:\t%08x\n", 741 pipe_name(pipe), 742 I915_READ(GEN8_DE_PIPE_IER(pipe))); 743 744 intel_display_power_put(dev_priv, power_domain); 745 } 746 747 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", 748 I915_READ(GEN8_DE_PORT_IMR)); 749 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", 750 I915_READ(GEN8_DE_PORT_IIR)); 751 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", 752 I915_READ(GEN8_DE_PORT_IER)); 753 754 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", 755 I915_READ(GEN8_DE_MISC_IMR)); 756 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", 757 I915_READ(GEN8_DE_MISC_IIR)); 758 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", 759 I915_READ(GEN8_DE_MISC_IER)); 760 761 seq_printf(m, "PCU interrupt mask:\t%08x\n", 762 I915_READ(GEN8_PCU_IMR)); 763 seq_printf(m, "PCU interrupt identity:\t%08x\n", 764 I915_READ(GEN8_PCU_IIR)); 765 seq_printf(m, "PCU interrupt enable:\t%08x\n", 766 I915_READ(GEN8_PCU_IER)); 767 } else if (IS_VALLEYVIEW(dev_priv)) { 768 seq_printf(m, "Display IER:\t%08x\n", 769 I915_READ(VLV_IER)); 770 seq_printf(m, "Display IIR:\t%08x\n", 771 I915_READ(VLV_IIR)); 772 seq_printf(m, "Display IIR_RW:\t%08x\n", 773 I915_READ(VLV_IIR_RW)); 774 seq_printf(m, "Display IMR:\t%08x\n", 775 I915_READ(VLV_IMR)); 776 for_each_pipe(dev_priv, pipe) { 777 enum intel_display_power_domain power_domain; 778 779 power_domain = POWER_DOMAIN_PIPE(pipe); 780 if (!intel_display_power_get_if_enabled(dev_priv, 781 power_domain)) { 782 seq_printf(m, "Pipe %c power disabled\n", 783 pipe_name(pipe)); 784 continue; 785 } 786 787 seq_printf(m, "Pipe %c stat:\t%08x\n", 788 pipe_name(pipe), 789 I915_READ(PIPESTAT(pipe))); 790 intel_display_power_put(dev_priv, power_domain); 791 } 792 793 seq_printf(m, "Master IER:\t%08x\n", 794 I915_READ(VLV_MASTER_IER)); 795 796 seq_printf(m, "Render IER:\t%08x\n", 797 I915_READ(GTIER)); 798 seq_printf(m, "Render IIR:\t%08x\n", 799 I915_READ(GTIIR)); 800 seq_printf(m, "Render IMR:\t%08x\n", 801 I915_READ(GTIMR)); 802 803 seq_printf(m, "PM IER:\t\t%08x\n", 804 I915_READ(GEN6_PMIER)); 805 seq_printf(m, "PM IIR:\t\t%08x\n", 806 I915_READ(GEN6_PMIIR)); 807 seq_printf(m, "PM IMR:\t\t%08x\n", 808 I915_READ(GEN6_PMIMR)); 809 810 seq_printf(m, "Port hotplug:\t%08x\n", 811 I915_READ(PORT_HOTPLUG_EN)); 812 seq_printf(m, "DPFLIPSTAT:\t%08x\n", 813 I915_READ(VLV_DPFLIPSTAT)); 814 seq_printf(m, "DPINVGTT:\t%08x\n", 815 I915_READ(DPINVGTT)); 816 817 } else if (!HAS_PCH_SPLIT(dev_priv)) { 818 seq_printf(m, "Interrupt enable: %08x\n", 819 I915_READ(IER)); 820 seq_printf(m, "Interrupt identity: %08x\n", 821 I915_READ(IIR)); 822 seq_printf(m, "Interrupt mask: %08x\n", 823 I915_READ(IMR)); 824 for_each_pipe(dev_priv, pipe) 825 seq_printf(m, "Pipe %c stat: %08x\n", 826 pipe_name(pipe), 827 I915_READ(PIPESTAT(pipe))); 828 } else { 829 seq_printf(m, "North Display Interrupt enable: %08x\n", 830 I915_READ(DEIER)); 831 seq_printf(m, "North Display Interrupt identity: %08x\n", 832 I915_READ(DEIIR)); 833 seq_printf(m, "North Display Interrupt mask: %08x\n", 834 I915_READ(DEIMR)); 835 seq_printf(m, "South Display Interrupt enable: %08x\n", 836 I915_READ(SDEIER)); 837 seq_printf(m, "South Display Interrupt identity: %08x\n", 838 I915_READ(SDEIIR)); 839 seq_printf(m, "South Display Interrupt mask: %08x\n", 840 I915_READ(SDEIMR)); 841 seq_printf(m, "Graphics Interrupt enable: %08x\n", 842 I915_READ(GTIER)); 843 seq_printf(m, "Graphics Interrupt identity: %08x\n", 844 I915_READ(GTIIR)); 845 seq_printf(m, "Graphics Interrupt mask: %08x\n", 846 I915_READ(GTIMR)); 847 } 848 if (INTEL_GEN(dev_priv) >= 6) { 849 for_each_engine(engine, dev_priv, id) { 850 seq_printf(m, 851 "Graphics Interrupt mask (%s): %08x\n", 852 engine->name, I915_READ_IMR(engine)); 853 } 854 } 855 intel_runtime_pm_put(dev_priv); 856 857 return 0; 858 } 859 860 static int i915_gem_fence_regs_info(struct seq_file *m, void *data) 861 { 862 struct drm_i915_private *dev_priv = node_to_i915(m->private); 863 struct drm_device *dev = &dev_priv->drm; 864 int i, ret; 865 866 ret = mutex_lock_interruptible(&dev->struct_mutex); 867 if (ret) 868 return ret; 869 870 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 871 for (i = 0; i < dev_priv->num_fence_regs; i++) { 872 struct i915_vma *vma = dev_priv->fence_regs[i].vma; 873 874 seq_printf(m, "Fence %d, pin count = %d, object = ", 875 i, dev_priv->fence_regs[i].pin_count); 876 if (!vma) 877 seq_puts(m, "unused"); 878 else 879 describe_obj(m, vma->obj); 880 seq_putc(m, '\n'); 881 } 882 883 mutex_unlock(&dev->struct_mutex); 884 return 0; 885 } 886 887 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 888 static ssize_t gpu_state_read(struct file *file, char __user *ubuf, 889 size_t count, loff_t *pos) 890 { 891 struct i915_gpu_state *error = file->private_data; 892 struct drm_i915_error_state_buf str; 893 ssize_t ret; 894 loff_t tmp; 895 896 if (!error) 897 return 0; 898 899 ret = i915_error_state_buf_init(&str, error->i915, count, *pos); 900 if (ret) 901 return ret; 902 903 ret = i915_error_state_to_str(&str, error); 904 if (ret) 905 goto out; 906 907 tmp = 0; 908 ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes); 909 if (ret < 0) 910 goto out; 911 912 *pos = str.start + ret; 913 out: 914 i915_error_state_buf_release(&str); 915 return ret; 916 } 917 918 static int gpu_state_release(struct inode *inode, struct file *file) 919 { 920 i915_gpu_state_put(file->private_data); 921 return 0; 922 } 923 924 static int i915_gpu_info_open(struct inode *inode, struct file *file) 925 { 926 struct drm_i915_private *i915 = inode->i_private; 927 struct i915_gpu_state *gpu; 928 929 intel_runtime_pm_get(i915); 930 gpu = i915_capture_gpu_state(i915); 931 intel_runtime_pm_put(i915); 932 if (!gpu) 933 return -ENOMEM; 934 935 file->private_data = gpu; 936 return 0; 937 } 938 939 static const struct file_operations i915_gpu_info_fops = { 940 .owner = THIS_MODULE, 941 .open = i915_gpu_info_open, 942 .read = gpu_state_read, 943 .llseek = default_llseek, 944 .release = gpu_state_release, 945 }; 946 947 static ssize_t 948 i915_error_state_write(struct file *filp, 949 const char __user *ubuf, 950 size_t cnt, 951 loff_t *ppos) 952 { 953 struct i915_gpu_state *error = filp->private_data; 954 955 if (!error) 956 return 0; 957 958 DRM_DEBUG_DRIVER("Resetting error state\n"); 959 i915_reset_error_state(error->i915); 960 961 return cnt; 962 } 963 964 static int i915_error_state_open(struct inode *inode, struct file *file) 965 { 966 file->private_data = i915_first_error_state(inode->i_private); 967 return 0; 968 } 969 970 static const struct file_operations i915_error_state_fops = { 971 .owner = THIS_MODULE, 972 .open = i915_error_state_open, 973 .read = gpu_state_read, 974 .write = i915_error_state_write, 975 .llseek = default_llseek, 976 .release = gpu_state_release, 977 }; 978 #endif 979 980 static int 981 i915_next_seqno_set(void *data, u64 val) 982 { 983 struct drm_i915_private *dev_priv = data; 984 struct drm_device *dev = &dev_priv->drm; 985 int ret; 986 987 ret = mutex_lock_interruptible(&dev->struct_mutex); 988 if (ret) 989 return ret; 990 991 ret = i915_gem_set_global_seqno(dev, val); 992 mutex_unlock(&dev->struct_mutex); 993 994 return ret; 995 } 996 997 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops, 998 NULL, i915_next_seqno_set, 999 "0x%llx\n"); 1000 1001 static int i915_frequency_info(struct seq_file *m, void *unused) 1002 { 1003 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1004 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1005 int ret = 0; 1006 1007 intel_runtime_pm_get(dev_priv); 1008 1009 if (IS_GEN5(dev_priv)) { 1010 u16 rgvswctl = I915_READ16(MEMSWCTL); 1011 u16 rgvstat = I915_READ16(MEMSTAT_ILK); 1012 1013 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 1014 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 1015 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 1016 MEMSTAT_VID_SHIFT); 1017 seq_printf(m, "Current P-state: %d\n", 1018 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 1019 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1020 u32 rpmodectl, freq_sts; 1021 1022 mutex_lock(&dev_priv->pcu_lock); 1023 1024 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1025 seq_printf(m, "Video Turbo Mode: %s\n", 1026 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 1027 seq_printf(m, "HW control enabled: %s\n", 1028 yesno(rpmodectl & GEN6_RP_ENABLE)); 1029 seq_printf(m, "SW control enabled: %s\n", 1030 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 1031 GEN6_RP_MEDIA_SW_MODE)); 1032 1033 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 1034 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 1035 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); 1036 1037 seq_printf(m, "actual GPU freq: %d MHz\n", 1038 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); 1039 1040 seq_printf(m, "current GPU freq: %d MHz\n", 1041 intel_gpu_freq(dev_priv, rps->cur_freq)); 1042 1043 seq_printf(m, "max GPU freq: %d MHz\n", 1044 intel_gpu_freq(dev_priv, rps->max_freq)); 1045 1046 seq_printf(m, "min GPU freq: %d MHz\n", 1047 intel_gpu_freq(dev_priv, rps->min_freq)); 1048 1049 seq_printf(m, "idle GPU freq: %d MHz\n", 1050 intel_gpu_freq(dev_priv, rps->idle_freq)); 1051 1052 seq_printf(m, 1053 "efficient (RPe) frequency: %d MHz\n", 1054 intel_gpu_freq(dev_priv, rps->efficient_freq)); 1055 mutex_unlock(&dev_priv->pcu_lock); 1056 } else if (INTEL_GEN(dev_priv) >= 6) { 1057 u32 rp_state_limits; 1058 u32 gt_perf_status; 1059 u32 rp_state_cap; 1060 u32 rpmodectl, rpinclimit, rpdeclimit; 1061 u32 rpstat, cagf, reqf; 1062 u32 rpupei, rpcurup, rpprevup; 1063 u32 rpdownei, rpcurdown, rpprevdown; 1064 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 1065 int max_freq; 1066 1067 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 1068 if (IS_GEN9_LP(dev_priv)) { 1069 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 1070 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); 1071 } else { 1072 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 1073 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 1074 } 1075 1076 /* RPSTAT1 is in the GT power well */ 1077 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1078 1079 reqf = I915_READ(GEN6_RPNSWREQ); 1080 if (INTEL_GEN(dev_priv) >= 9) 1081 reqf >>= 23; 1082 else { 1083 reqf &= ~GEN6_TURBO_DISABLE; 1084 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1085 reqf >>= 24; 1086 else 1087 reqf >>= 25; 1088 } 1089 reqf = intel_gpu_freq(dev_priv, reqf); 1090 1091 rpmodectl = I915_READ(GEN6_RP_CONTROL); 1092 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); 1093 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); 1094 1095 rpstat = I915_READ(GEN6_RPSTAT1); 1096 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 1097 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 1098 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 1099 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 1100 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 1101 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 1102 cagf = intel_gpu_freq(dev_priv, 1103 intel_get_cagf(dev_priv, rpstat)); 1104 1105 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1106 1107 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) { 1108 pm_ier = I915_READ(GEN6_PMIER); 1109 pm_imr = I915_READ(GEN6_PMIMR); 1110 pm_isr = I915_READ(GEN6_PMISR); 1111 pm_iir = I915_READ(GEN6_PMIIR); 1112 pm_mask = I915_READ(GEN6_PMINTRMSK); 1113 } else { 1114 pm_ier = I915_READ(GEN8_GT_IER(2)); 1115 pm_imr = I915_READ(GEN8_GT_IMR(2)); 1116 pm_isr = I915_READ(GEN8_GT_ISR(2)); 1117 pm_iir = I915_READ(GEN8_GT_IIR(2)); 1118 pm_mask = I915_READ(GEN6_PMINTRMSK); 1119 } 1120 seq_printf(m, "Video Turbo Mode: %s\n", 1121 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); 1122 seq_printf(m, "HW control enabled: %s\n", 1123 yesno(rpmodectl & GEN6_RP_ENABLE)); 1124 seq_printf(m, "SW control enabled: %s\n", 1125 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == 1126 GEN6_RP_MEDIA_SW_MODE)); 1127 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1128 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1129 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n", 1130 rps->pm_intrmsk_mbz); 1131 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1132 seq_printf(m, "Render p-state ratio: %d\n", 1133 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 1134 seq_printf(m, "Render p-state VID: %d\n", 1135 gt_perf_status & 0xff); 1136 seq_printf(m, "Render p-state limit: %d\n", 1137 rp_state_limits & 0xff); 1138 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat); 1139 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl); 1140 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit); 1141 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 1142 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 1143 seq_printf(m, "CAGF: %dMHz\n", cagf); 1144 seq_printf(m, "RP CUR UP EI: %d (%dus)\n", 1145 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei)); 1146 seq_printf(m, "RP CUR UP: %d (%dus)\n", 1147 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup)); 1148 seq_printf(m, "RP PREV UP: %d (%dus)\n", 1149 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup)); 1150 seq_printf(m, "Up threshold: %d%%\n", rps->up_threshold); 1151 1152 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n", 1153 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei)); 1154 seq_printf(m, "RP CUR DOWN: %d (%dus)\n", 1155 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown)); 1156 seq_printf(m, "RP PREV DOWN: %d (%dus)\n", 1157 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown)); 1158 seq_printf(m, "Down threshold: %d%%\n", rps->down_threshold); 1159 1160 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 : 1161 rp_state_cap >> 16) & 0xff; 1162 max_freq *= (IS_GEN9_BC(dev_priv) || 1163 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1164 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 1165 intel_gpu_freq(dev_priv, max_freq)); 1166 1167 max_freq = (rp_state_cap & 0xff00) >> 8; 1168 max_freq *= (IS_GEN9_BC(dev_priv) || 1169 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1170 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 1171 intel_gpu_freq(dev_priv, max_freq)); 1172 1173 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : 1174 rp_state_cap >> 0) & 0xff; 1175 max_freq *= (IS_GEN9_BC(dev_priv) || 1176 IS_CANNONLAKE(dev_priv) ? GEN9_FREQ_SCALER : 1); 1177 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 1178 intel_gpu_freq(dev_priv, max_freq)); 1179 seq_printf(m, "Max overclocked frequency: %dMHz\n", 1180 intel_gpu_freq(dev_priv, rps->max_freq)); 1181 1182 seq_printf(m, "Current freq: %d MHz\n", 1183 intel_gpu_freq(dev_priv, rps->cur_freq)); 1184 seq_printf(m, "Actual freq: %d MHz\n", cagf); 1185 seq_printf(m, "Idle freq: %d MHz\n", 1186 intel_gpu_freq(dev_priv, rps->idle_freq)); 1187 seq_printf(m, "Min freq: %d MHz\n", 1188 intel_gpu_freq(dev_priv, rps->min_freq)); 1189 seq_printf(m, "Boost freq: %d MHz\n", 1190 intel_gpu_freq(dev_priv, rps->boost_freq)); 1191 seq_printf(m, "Max freq: %d MHz\n", 1192 intel_gpu_freq(dev_priv, rps->max_freq)); 1193 seq_printf(m, 1194 "efficient (RPe) frequency: %d MHz\n", 1195 intel_gpu_freq(dev_priv, rps->efficient_freq)); 1196 } else { 1197 seq_puts(m, "no P-state info available\n"); 1198 } 1199 1200 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk); 1201 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1202 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1203 1204 intel_runtime_pm_put(dev_priv); 1205 return ret; 1206 } 1207 1208 static void i915_instdone_info(struct drm_i915_private *dev_priv, 1209 struct seq_file *m, 1210 struct intel_instdone *instdone) 1211 { 1212 int slice; 1213 int subslice; 1214 1215 seq_printf(m, "\t\tINSTDONE: 0x%08x\n", 1216 instdone->instdone); 1217 1218 if (INTEL_GEN(dev_priv) <= 3) 1219 return; 1220 1221 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n", 1222 instdone->slice_common); 1223 1224 if (INTEL_GEN(dev_priv) <= 6) 1225 return; 1226 1227 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1228 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", 1229 slice, subslice, instdone->sampler[slice][subslice]); 1230 1231 for_each_instdone_slice_subslice(dev_priv, slice, subslice) 1232 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", 1233 slice, subslice, instdone->row[slice][subslice]); 1234 } 1235 1236 static int i915_hangcheck_info(struct seq_file *m, void *unused) 1237 { 1238 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1239 struct intel_engine_cs *engine; 1240 u64 acthd[I915_NUM_ENGINES]; 1241 u32 seqno[I915_NUM_ENGINES]; 1242 struct intel_instdone instdone; 1243 enum intel_engine_id id; 1244 1245 if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 1246 seq_puts(m, "Wedged\n"); 1247 if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) 1248 seq_puts(m, "Reset in progress: struct_mutex backoff\n"); 1249 if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags)) 1250 seq_puts(m, "Reset in progress: reset handoff to waiter\n"); 1251 if (waitqueue_active(&dev_priv->gpu_error.wait_queue)) 1252 seq_puts(m, "Waiter holding struct mutex\n"); 1253 if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) 1254 seq_puts(m, "struct_mutex blocked for reset\n"); 1255 1256 if (!i915_modparams.enable_hangcheck) { 1257 seq_puts(m, "Hangcheck disabled\n"); 1258 return 0; 1259 } 1260 1261 intel_runtime_pm_get(dev_priv); 1262 1263 for_each_engine(engine, dev_priv, id) { 1264 acthd[id] = intel_engine_get_active_head(engine); 1265 seqno[id] = intel_engine_get_seqno(engine); 1266 } 1267 1268 intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); 1269 1270 intel_runtime_pm_put(dev_priv); 1271 1272 if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) 1273 seq_printf(m, "Hangcheck active, timer fires in %dms\n", 1274 jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires - 1275 jiffies)); 1276 else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) 1277 seq_puts(m, "Hangcheck active, work pending\n"); 1278 else 1279 seq_puts(m, "Hangcheck inactive\n"); 1280 1281 seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake)); 1282 1283 for_each_engine(engine, dev_priv, id) { 1284 struct intel_breadcrumbs *b = &engine->breadcrumbs; 1285 struct rb_node *rb; 1286 1287 seq_printf(m, "%s:\n", engine->name); 1288 seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n", 1289 engine->hangcheck.seqno, seqno[id], 1290 intel_engine_last_submit(engine), 1291 engine->timeline->inflight_seqnos); 1292 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n", 1293 yesno(intel_engine_has_waiter(engine)), 1294 yesno(test_bit(engine->id, 1295 &dev_priv->gpu_error.missed_irq_rings)), 1296 yesno(engine->hangcheck.stalled)); 1297 1298 spin_lock_irq(&b->rb_lock); 1299 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { 1300 struct intel_wait *w = rb_entry(rb, typeof(*w), node); 1301 1302 seq_printf(m, "\t%s [%d] waiting for %x\n", 1303 w->tsk->comm, w->tsk->pid, w->seqno); 1304 } 1305 spin_unlock_irq(&b->rb_lock); 1306 1307 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1308 (long long)engine->hangcheck.acthd, 1309 (long long)acthd[id]); 1310 seq_printf(m, "\taction = %s(%d) %d ms ago\n", 1311 hangcheck_action_to_str(engine->hangcheck.action), 1312 engine->hangcheck.action, 1313 jiffies_to_msecs(jiffies - 1314 engine->hangcheck.action_timestamp)); 1315 1316 if (engine->id == RCS) { 1317 seq_puts(m, "\tinstdone read =\n"); 1318 1319 i915_instdone_info(dev_priv, m, &instdone); 1320 1321 seq_puts(m, "\tinstdone accu =\n"); 1322 1323 i915_instdone_info(dev_priv, m, 1324 &engine->hangcheck.instdone); 1325 } 1326 } 1327 1328 return 0; 1329 } 1330 1331 static int i915_reset_info(struct seq_file *m, void *unused) 1332 { 1333 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1334 struct i915_gpu_error *error = &dev_priv->gpu_error; 1335 struct intel_engine_cs *engine; 1336 enum intel_engine_id id; 1337 1338 seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error)); 1339 1340 for_each_engine(engine, dev_priv, id) { 1341 seq_printf(m, "%s = %u\n", engine->name, 1342 i915_reset_engine_count(error, engine)); 1343 } 1344 1345 return 0; 1346 } 1347 1348 static int ironlake_drpc_info(struct seq_file *m) 1349 { 1350 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1351 u32 rgvmodectl, rstdbyctl; 1352 u16 crstandvid; 1353 1354 rgvmodectl = I915_READ(MEMMODECTL); 1355 rstdbyctl = I915_READ(RSTDBYCTL); 1356 crstandvid = I915_READ16(CRSTANDVID); 1357 1358 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN)); 1359 seq_printf(m, "Boost freq: %d\n", 1360 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 1361 MEMMODE_BOOST_FREQ_SHIFT); 1362 seq_printf(m, "HW control enabled: %s\n", 1363 yesno(rgvmodectl & MEMMODE_HWIDLE_EN)); 1364 seq_printf(m, "SW control enabled: %s\n", 1365 yesno(rgvmodectl & MEMMODE_SWMODE_EN)); 1366 seq_printf(m, "Gated voltage change: %s\n", 1367 yesno(rgvmodectl & MEMMODE_RCLK_GATE)); 1368 seq_printf(m, "Starting frequency: P%d\n", 1369 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 1370 seq_printf(m, "Max P-state: P%d\n", 1371 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 1372 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 1373 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 1374 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 1375 seq_printf(m, "Render standby enabled: %s\n", 1376 yesno(!(rstdbyctl & RCX_SW_EXIT))); 1377 seq_puts(m, "Current RS state: "); 1378 switch (rstdbyctl & RSX_STATUS_MASK) { 1379 case RSX_STATUS_ON: 1380 seq_puts(m, "on\n"); 1381 break; 1382 case RSX_STATUS_RC1: 1383 seq_puts(m, "RC1\n"); 1384 break; 1385 case RSX_STATUS_RC1E: 1386 seq_puts(m, "RC1E\n"); 1387 break; 1388 case RSX_STATUS_RS1: 1389 seq_puts(m, "RS1\n"); 1390 break; 1391 case RSX_STATUS_RS2: 1392 seq_puts(m, "RS2 (RC6)\n"); 1393 break; 1394 case RSX_STATUS_RS3: 1395 seq_puts(m, "RC3 (RC6+)\n"); 1396 break; 1397 default: 1398 seq_puts(m, "unknown\n"); 1399 break; 1400 } 1401 1402 return 0; 1403 } 1404 1405 static int i915_forcewake_domains(struct seq_file *m, void *data) 1406 { 1407 struct drm_i915_private *i915 = node_to_i915(m->private); 1408 struct intel_uncore_forcewake_domain *fw_domain; 1409 unsigned int tmp; 1410 1411 seq_printf(m, "user.bypass_count = %u\n", 1412 i915->uncore.user_forcewake.count); 1413 1414 for_each_fw_domain(fw_domain, i915, tmp) 1415 seq_printf(m, "%s.wake_count = %u\n", 1416 intel_uncore_forcewake_domain_to_str(fw_domain->id), 1417 READ_ONCE(fw_domain->wake_count)); 1418 1419 return 0; 1420 } 1421 1422 static void print_rc6_res(struct seq_file *m, 1423 const char *title, 1424 const i915_reg_t reg) 1425 { 1426 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1427 1428 seq_printf(m, "%s %u (%llu us)\n", 1429 title, I915_READ(reg), 1430 intel_rc6_residency_us(dev_priv, reg)); 1431 } 1432 1433 static int vlv_drpc_info(struct seq_file *m) 1434 { 1435 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1436 u32 rcctl1, pw_status; 1437 1438 pw_status = I915_READ(VLV_GTLC_PW_STATUS); 1439 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1440 1441 seq_printf(m, "RC6 Enabled: %s\n", 1442 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1443 GEN6_RC_CTL_EI_MODE(1)))); 1444 seq_printf(m, "Render Power Well: %s\n", 1445 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 1446 seq_printf(m, "Media Power Well: %s\n", 1447 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1448 1449 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6); 1450 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6); 1451 1452 return i915_forcewake_domains(m, NULL); 1453 } 1454 1455 static int gen6_drpc_info(struct seq_file *m) 1456 { 1457 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1458 u32 gt_core_status, rcctl1, rc6vids = 0; 1459 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; 1460 unsigned forcewake_count; 1461 int count = 0; 1462 1463 forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count); 1464 if (forcewake_count) { 1465 seq_puts(m, "RC information inaccurate because somebody " 1466 "holds a forcewake reference \n"); 1467 } else { 1468 /* NB: we cannot use forcewake, else we read the wrong values */ 1469 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1470 udelay(10); 1471 seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1472 } 1473 1474 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1475 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); 1476 1477 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1478 if (INTEL_GEN(dev_priv) >= 9) { 1479 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE); 1480 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); 1481 } 1482 1483 mutex_lock(&dev_priv->pcu_lock); 1484 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1485 mutex_unlock(&dev_priv->pcu_lock); 1486 1487 seq_printf(m, "RC1e Enabled: %s\n", 1488 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 1489 seq_printf(m, "RC6 Enabled: %s\n", 1490 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 1491 if (INTEL_GEN(dev_priv) >= 9) { 1492 seq_printf(m, "Render Well Gating Enabled: %s\n", 1493 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); 1494 seq_printf(m, "Media Well Gating Enabled: %s\n", 1495 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); 1496 } 1497 seq_printf(m, "Deep RC6 Enabled: %s\n", 1498 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 1499 seq_printf(m, "Deepest RC6 Enabled: %s\n", 1500 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 1501 seq_puts(m, "Current RC state: "); 1502 switch (gt_core_status & GEN6_RCn_MASK) { 1503 case GEN6_RC0: 1504 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 1505 seq_puts(m, "Core Power Down\n"); 1506 else 1507 seq_puts(m, "on\n"); 1508 break; 1509 case GEN6_RC3: 1510 seq_puts(m, "RC3\n"); 1511 break; 1512 case GEN6_RC6: 1513 seq_puts(m, "RC6\n"); 1514 break; 1515 case GEN6_RC7: 1516 seq_puts(m, "RC7\n"); 1517 break; 1518 default: 1519 seq_puts(m, "Unknown\n"); 1520 break; 1521 } 1522 1523 seq_printf(m, "Core Power Down: %s\n", 1524 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1525 if (INTEL_GEN(dev_priv) >= 9) { 1526 seq_printf(m, "Render Power Well: %s\n", 1527 (gen9_powergate_status & 1528 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); 1529 seq_printf(m, "Media Power Well: %s\n", 1530 (gen9_powergate_status & 1531 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down"); 1532 } 1533 1534 /* Not exactly sure what this is */ 1535 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:", 1536 GEN6_GT_GFX_RC6_LOCKED); 1537 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6); 1538 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); 1539 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); 1540 1541 seq_printf(m, "RC6 voltage: %dmV\n", 1542 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1543 seq_printf(m, "RC6+ voltage: %dmV\n", 1544 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1545 seq_printf(m, "RC6++ voltage: %dmV\n", 1546 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1547 return i915_forcewake_domains(m, NULL); 1548 } 1549 1550 static int i915_drpc_info(struct seq_file *m, void *unused) 1551 { 1552 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1553 int err; 1554 1555 intel_runtime_pm_get(dev_priv); 1556 1557 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1558 err = vlv_drpc_info(m); 1559 else if (INTEL_GEN(dev_priv) >= 6) 1560 err = gen6_drpc_info(m); 1561 else 1562 err = ironlake_drpc_info(m); 1563 1564 intel_runtime_pm_put(dev_priv); 1565 1566 return err; 1567 } 1568 1569 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused) 1570 { 1571 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1572 1573 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1574 dev_priv->fb_tracking.busy_bits); 1575 1576 seq_printf(m, "FB tracking flip bits: 0x%08x\n", 1577 dev_priv->fb_tracking.flip_bits); 1578 1579 return 0; 1580 } 1581 1582 static int i915_fbc_status(struct seq_file *m, void *unused) 1583 { 1584 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1585 struct intel_fbc *fbc = &dev_priv->fbc; 1586 1587 if (!HAS_FBC(dev_priv)) 1588 return -ENODEV; 1589 1590 intel_runtime_pm_get(dev_priv); 1591 mutex_lock(&fbc->lock); 1592 1593 if (intel_fbc_is_active(dev_priv)) 1594 seq_puts(m, "FBC enabled\n"); 1595 else 1596 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 1597 1598 if (fbc->work.scheduled) 1599 seq_printf(m, "FBC worker scheduled on vblank %u, now %llu\n", 1600 fbc->work.scheduled_vblank, 1601 drm_crtc_vblank_count(&fbc->crtc->base)); 1602 1603 if (intel_fbc_is_active(dev_priv)) { 1604 u32 mask; 1605 1606 if (INTEL_GEN(dev_priv) >= 8) 1607 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; 1608 else if (INTEL_GEN(dev_priv) >= 7) 1609 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; 1610 else if (INTEL_GEN(dev_priv) >= 5) 1611 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; 1612 else if (IS_G4X(dev_priv)) 1613 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK; 1614 else 1615 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING | 1616 FBC_STAT_COMPRESSED); 1617 1618 seq_printf(m, "Compressing: %s\n", yesno(mask)); 1619 } 1620 1621 mutex_unlock(&fbc->lock); 1622 intel_runtime_pm_put(dev_priv); 1623 1624 return 0; 1625 } 1626 1627 static int i915_fbc_false_color_get(void *data, u64 *val) 1628 { 1629 struct drm_i915_private *dev_priv = data; 1630 1631 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1632 return -ENODEV; 1633 1634 *val = dev_priv->fbc.false_color; 1635 1636 return 0; 1637 } 1638 1639 static int i915_fbc_false_color_set(void *data, u64 val) 1640 { 1641 struct drm_i915_private *dev_priv = data; 1642 u32 reg; 1643 1644 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv)) 1645 return -ENODEV; 1646 1647 mutex_lock(&dev_priv->fbc.lock); 1648 1649 reg = I915_READ(ILK_DPFC_CONTROL); 1650 dev_priv->fbc.false_color = val; 1651 1652 I915_WRITE(ILK_DPFC_CONTROL, val ? 1653 (reg | FBC_CTL_FALSE_COLOR) : 1654 (reg & ~FBC_CTL_FALSE_COLOR)); 1655 1656 mutex_unlock(&dev_priv->fbc.lock); 1657 return 0; 1658 } 1659 1660 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, 1661 i915_fbc_false_color_get, i915_fbc_false_color_set, 1662 "%llu\n"); 1663 1664 static int i915_ips_status(struct seq_file *m, void *unused) 1665 { 1666 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1667 1668 if (!HAS_IPS(dev_priv)) 1669 return -ENODEV; 1670 1671 intel_runtime_pm_get(dev_priv); 1672 1673 seq_printf(m, "Enabled by kernel parameter: %s\n", 1674 yesno(i915_modparams.enable_ips)); 1675 1676 if (INTEL_GEN(dev_priv) >= 8) { 1677 seq_puts(m, "Currently: unknown\n"); 1678 } else { 1679 if (I915_READ(IPS_CTL) & IPS_ENABLE) 1680 seq_puts(m, "Currently: enabled\n"); 1681 else 1682 seq_puts(m, "Currently: disabled\n"); 1683 } 1684 1685 intel_runtime_pm_put(dev_priv); 1686 1687 return 0; 1688 } 1689 1690 static int i915_sr_status(struct seq_file *m, void *unused) 1691 { 1692 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1693 bool sr_enabled = false; 1694 1695 intel_runtime_pm_get(dev_priv); 1696 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 1697 1698 if (INTEL_GEN(dev_priv) >= 9) 1699 /* no global SR status; inspect per-plane WM */; 1700 else if (HAS_PCH_SPLIT(dev_priv)) 1701 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1702 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) || 1703 IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 1704 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1705 else if (IS_I915GM(dev_priv)) 1706 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1707 else if (IS_PINEVIEW(dev_priv)) 1708 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1709 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1710 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; 1711 1712 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1713 intel_runtime_pm_put(dev_priv); 1714 1715 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled)); 1716 1717 return 0; 1718 } 1719 1720 static int i915_emon_status(struct seq_file *m, void *unused) 1721 { 1722 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1723 struct drm_device *dev = &dev_priv->drm; 1724 unsigned long temp, chipset, gfx; 1725 int ret; 1726 1727 if (!IS_GEN5(dev_priv)) 1728 return -ENODEV; 1729 1730 ret = mutex_lock_interruptible(&dev->struct_mutex); 1731 if (ret) 1732 return ret; 1733 1734 temp = i915_mch_val(dev_priv); 1735 chipset = i915_chipset_val(dev_priv); 1736 gfx = i915_gfx_val(dev_priv); 1737 mutex_unlock(&dev->struct_mutex); 1738 1739 seq_printf(m, "GMCH temp: %ld\n", temp); 1740 seq_printf(m, "Chipset power: %ld\n", chipset); 1741 seq_printf(m, "GFX power: %ld\n", gfx); 1742 seq_printf(m, "Total power: %ld\n", chipset + gfx); 1743 1744 return 0; 1745 } 1746 1747 static int i915_ring_freq_table(struct seq_file *m, void *unused) 1748 { 1749 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1750 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1751 int ret = 0; 1752 int gpu_freq, ia_freq; 1753 unsigned int max_gpu_freq, min_gpu_freq; 1754 1755 if (!HAS_LLC(dev_priv)) 1756 return -ENODEV; 1757 1758 intel_runtime_pm_get(dev_priv); 1759 1760 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 1761 if (ret) 1762 goto out; 1763 1764 if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) { 1765 /* Convert GT frequency to 50 HZ units */ 1766 min_gpu_freq = rps->min_freq_softlimit / GEN9_FREQ_SCALER; 1767 max_gpu_freq = rps->max_freq_softlimit / GEN9_FREQ_SCALER; 1768 } else { 1769 min_gpu_freq = rps->min_freq_softlimit; 1770 max_gpu_freq = rps->max_freq_softlimit; 1771 } 1772 1773 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 1774 1775 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 1776 ia_freq = gpu_freq; 1777 sandybridge_pcode_read(dev_priv, 1778 GEN6_PCODE_READ_MIN_FREQ_TABLE, 1779 &ia_freq); 1780 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 1781 intel_gpu_freq(dev_priv, (gpu_freq * 1782 (IS_GEN9_BC(dev_priv) || 1783 IS_CANNONLAKE(dev_priv) ? 1784 GEN9_FREQ_SCALER : 1))), 1785 ((ia_freq >> 0) & 0xff) * 100, 1786 ((ia_freq >> 8) & 0xff) * 100); 1787 } 1788 1789 mutex_unlock(&dev_priv->pcu_lock); 1790 1791 out: 1792 intel_runtime_pm_put(dev_priv); 1793 return ret; 1794 } 1795 1796 static int i915_opregion(struct seq_file *m, void *unused) 1797 { 1798 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1799 struct drm_device *dev = &dev_priv->drm; 1800 struct intel_opregion *opregion = &dev_priv->opregion; 1801 int ret; 1802 1803 ret = mutex_lock_interruptible(&dev->struct_mutex); 1804 if (ret) 1805 goto out; 1806 1807 if (opregion->header) 1808 seq_write(m, opregion->header, OPREGION_SIZE); 1809 1810 mutex_unlock(&dev->struct_mutex); 1811 1812 out: 1813 return 0; 1814 } 1815 1816 static int i915_vbt(struct seq_file *m, void *unused) 1817 { 1818 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; 1819 1820 if (opregion->vbt) 1821 seq_write(m, opregion->vbt, opregion->vbt_size); 1822 1823 return 0; 1824 } 1825 1826 static int i915_gem_framebuffer_info(struct seq_file *m, void *data) 1827 { 1828 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1829 struct drm_device *dev = &dev_priv->drm; 1830 struct intel_framebuffer *fbdev_fb = NULL; 1831 struct drm_framebuffer *drm_fb; 1832 int ret; 1833 1834 ret = mutex_lock_interruptible(&dev->struct_mutex); 1835 if (ret) 1836 return ret; 1837 1838 #ifdef CONFIG_DRM_FBDEV_EMULATION 1839 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { 1840 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb); 1841 1842 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1843 fbdev_fb->base.width, 1844 fbdev_fb->base.height, 1845 fbdev_fb->base.format->depth, 1846 fbdev_fb->base.format->cpp[0] * 8, 1847 fbdev_fb->base.modifier, 1848 drm_framebuffer_read_refcount(&fbdev_fb->base)); 1849 describe_obj(m, fbdev_fb->obj); 1850 seq_putc(m, '\n'); 1851 } 1852 #endif 1853 1854 mutex_lock(&dev->mode_config.fb_lock); 1855 drm_for_each_fb(drm_fb, dev) { 1856 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); 1857 if (fb == fbdev_fb) 1858 continue; 1859 1860 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1861 fb->base.width, 1862 fb->base.height, 1863 fb->base.format->depth, 1864 fb->base.format->cpp[0] * 8, 1865 fb->base.modifier, 1866 drm_framebuffer_read_refcount(&fb->base)); 1867 describe_obj(m, fb->obj); 1868 seq_putc(m, '\n'); 1869 } 1870 mutex_unlock(&dev->mode_config.fb_lock); 1871 mutex_unlock(&dev->struct_mutex); 1872 1873 return 0; 1874 } 1875 1876 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) 1877 { 1878 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)", 1879 ring->space, ring->head, ring->tail); 1880 } 1881 1882 static int i915_context_status(struct seq_file *m, void *unused) 1883 { 1884 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1885 struct drm_device *dev = &dev_priv->drm; 1886 struct intel_engine_cs *engine; 1887 struct i915_gem_context *ctx; 1888 enum intel_engine_id id; 1889 int ret; 1890 1891 ret = mutex_lock_interruptible(&dev->struct_mutex); 1892 if (ret) 1893 return ret; 1894 1895 list_for_each_entry(ctx, &dev_priv->contexts.list, link) { 1896 seq_printf(m, "HW context %u ", ctx->hw_id); 1897 if (ctx->pid) { 1898 struct task_struct *task; 1899 1900 task = get_pid_task(ctx->pid, PIDTYPE_PID); 1901 if (task) { 1902 seq_printf(m, "(%s [%d]) ", 1903 task->comm, task->pid); 1904 put_task_struct(task); 1905 } 1906 } else if (IS_ERR(ctx->file_priv)) { 1907 seq_puts(m, "(deleted) "); 1908 } else { 1909 seq_puts(m, "(kernel) "); 1910 } 1911 1912 seq_putc(m, ctx->remap_slice ? 'R' : 'r'); 1913 seq_putc(m, '\n'); 1914 1915 for_each_engine(engine, dev_priv, id) { 1916 struct intel_context *ce = &ctx->engine[engine->id]; 1917 1918 seq_printf(m, "%s: ", engine->name); 1919 if (ce->state) 1920 describe_obj(m, ce->state->obj); 1921 if (ce->ring) 1922 describe_ctx_ring(m, ce->ring); 1923 seq_putc(m, '\n'); 1924 } 1925 1926 seq_putc(m, '\n'); 1927 } 1928 1929 mutex_unlock(&dev->struct_mutex); 1930 1931 return 0; 1932 } 1933 1934 static const char *swizzle_string(unsigned swizzle) 1935 { 1936 switch (swizzle) { 1937 case I915_BIT_6_SWIZZLE_NONE: 1938 return "none"; 1939 case I915_BIT_6_SWIZZLE_9: 1940 return "bit9"; 1941 case I915_BIT_6_SWIZZLE_9_10: 1942 return "bit9/bit10"; 1943 case I915_BIT_6_SWIZZLE_9_11: 1944 return "bit9/bit11"; 1945 case I915_BIT_6_SWIZZLE_9_10_11: 1946 return "bit9/bit10/bit11"; 1947 case I915_BIT_6_SWIZZLE_9_17: 1948 return "bit9/bit17"; 1949 case I915_BIT_6_SWIZZLE_9_10_17: 1950 return "bit9/bit10/bit17"; 1951 case I915_BIT_6_SWIZZLE_UNKNOWN: 1952 return "unknown"; 1953 } 1954 1955 return "bug"; 1956 } 1957 1958 static int i915_swizzle_info(struct seq_file *m, void *data) 1959 { 1960 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1961 1962 intel_runtime_pm_get(dev_priv); 1963 1964 seq_printf(m, "bit6 swizzle for X-tiling = %s\n", 1965 swizzle_string(dev_priv->mm.bit_6_swizzle_x)); 1966 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", 1967 swizzle_string(dev_priv->mm.bit_6_swizzle_y)); 1968 1969 if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) { 1970 seq_printf(m, "DDC = 0x%08x\n", 1971 I915_READ(DCC)); 1972 seq_printf(m, "DDC2 = 0x%08x\n", 1973 I915_READ(DCC2)); 1974 seq_printf(m, "C0DRB3 = 0x%04x\n", 1975 I915_READ16(C0DRB3)); 1976 seq_printf(m, "C1DRB3 = 0x%04x\n", 1977 I915_READ16(C1DRB3)); 1978 } else if (INTEL_GEN(dev_priv) >= 6) { 1979 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n", 1980 I915_READ(MAD_DIMM_C0)); 1981 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n", 1982 I915_READ(MAD_DIMM_C1)); 1983 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n", 1984 I915_READ(MAD_DIMM_C2)); 1985 seq_printf(m, "TILECTL = 0x%08x\n", 1986 I915_READ(TILECTL)); 1987 if (INTEL_GEN(dev_priv) >= 8) 1988 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 1989 I915_READ(GAMTARBMODE)); 1990 else 1991 seq_printf(m, "ARB_MODE = 0x%08x\n", 1992 I915_READ(ARB_MODE)); 1993 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 1994 I915_READ(DISP_ARB_CTL)); 1995 } 1996 1997 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 1998 seq_puts(m, "L-shaped memory detected\n"); 1999 2000 intel_runtime_pm_put(dev_priv); 2001 2002 return 0; 2003 } 2004 2005 static int per_file_ctx(int id, void *ptr, void *data) 2006 { 2007 struct i915_gem_context *ctx = ptr; 2008 struct seq_file *m = data; 2009 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2010 2011 if (!ppgtt) { 2012 seq_printf(m, " no ppgtt for context %d\n", 2013 ctx->user_handle); 2014 return 0; 2015 } 2016 2017 if (i915_gem_context_is_default(ctx)) 2018 seq_puts(m, " default context:\n"); 2019 else 2020 seq_printf(m, " context %d:\n", ctx->user_handle); 2021 ppgtt->debug_dump(ppgtt, m); 2022 2023 return 0; 2024 } 2025 2026 static void gen8_ppgtt_info(struct seq_file *m, 2027 struct drm_i915_private *dev_priv) 2028 { 2029 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2030 struct intel_engine_cs *engine; 2031 enum intel_engine_id id; 2032 int i; 2033 2034 if (!ppgtt) 2035 return; 2036 2037 for_each_engine(engine, dev_priv, id) { 2038 seq_printf(m, "%s\n", engine->name); 2039 for (i = 0; i < 4; i++) { 2040 u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); 2041 pdp <<= 32; 2042 pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); 2043 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2044 } 2045 } 2046 } 2047 2048 static void gen6_ppgtt_info(struct seq_file *m, 2049 struct drm_i915_private *dev_priv) 2050 { 2051 struct intel_engine_cs *engine; 2052 enum intel_engine_id id; 2053 2054 if (IS_GEN6(dev_priv)) 2055 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2056 2057 for_each_engine(engine, dev_priv, id) { 2058 seq_printf(m, "%s\n", engine->name); 2059 if (IS_GEN7(dev_priv)) 2060 seq_printf(m, "GFX_MODE: 0x%08x\n", 2061 I915_READ(RING_MODE_GEN7(engine))); 2062 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2063 I915_READ(RING_PP_DIR_BASE(engine))); 2064 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", 2065 I915_READ(RING_PP_DIR_BASE_READ(engine))); 2066 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", 2067 I915_READ(RING_PP_DIR_DCLV(engine))); 2068 } 2069 if (dev_priv->mm.aliasing_ppgtt) { 2070 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2071 2072 seq_puts(m, "aliasing PPGTT:\n"); 2073 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset); 2074 2075 ppgtt->debug_dump(ppgtt, m); 2076 } 2077 2078 seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); 2079 } 2080 2081 static int i915_ppgtt_info(struct seq_file *m, void *data) 2082 { 2083 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2084 struct drm_device *dev = &dev_priv->drm; 2085 struct drm_file *file; 2086 int ret; 2087 2088 mutex_lock(&dev->filelist_mutex); 2089 ret = mutex_lock_interruptible(&dev->struct_mutex); 2090 if (ret) 2091 goto out_unlock; 2092 2093 intel_runtime_pm_get(dev_priv); 2094 2095 if (INTEL_GEN(dev_priv) >= 8) 2096 gen8_ppgtt_info(m, dev_priv); 2097 else if (INTEL_GEN(dev_priv) >= 6) 2098 gen6_ppgtt_info(m, dev_priv); 2099 2100 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2101 struct drm_i915_file_private *file_priv = file->driver_priv; 2102 struct task_struct *task; 2103 2104 task = get_pid_task(file->pid, PIDTYPE_PID); 2105 if (!task) { 2106 ret = -ESRCH; 2107 goto out_rpm; 2108 } 2109 seq_printf(m, "\nproc: %s\n", task->comm); 2110 put_task_struct(task); 2111 idr_for_each(&file_priv->context_idr, per_file_ctx, 2112 (void *)(unsigned long)m); 2113 } 2114 2115 out_rpm: 2116 intel_runtime_pm_put(dev_priv); 2117 mutex_unlock(&dev->struct_mutex); 2118 out_unlock: 2119 mutex_unlock(&dev->filelist_mutex); 2120 return ret; 2121 } 2122 2123 static int count_irq_waiters(struct drm_i915_private *i915) 2124 { 2125 struct intel_engine_cs *engine; 2126 enum intel_engine_id id; 2127 int count = 0; 2128 2129 for_each_engine(engine, i915, id) 2130 count += intel_engine_has_waiter(engine); 2131 2132 return count; 2133 } 2134 2135 static const char *rps_power_to_str(unsigned int power) 2136 { 2137 static const char * const strings[] = { 2138 [LOW_POWER] = "low power", 2139 [BETWEEN] = "mixed", 2140 [HIGH_POWER] = "high power", 2141 }; 2142 2143 if (power >= ARRAY_SIZE(strings) || !strings[power]) 2144 return "unknown"; 2145 2146 return strings[power]; 2147 } 2148 2149 static int i915_rps_boost_info(struct seq_file *m, void *data) 2150 { 2151 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2152 struct drm_device *dev = &dev_priv->drm; 2153 struct intel_rps *rps = &dev_priv->gt_pm.rps; 2154 struct drm_file *file; 2155 2156 seq_printf(m, "RPS enabled? %d\n", rps->enabled); 2157 seq_printf(m, "GPU busy? %s [%d requests]\n", 2158 yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); 2159 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2160 seq_printf(m, "Boosts outstanding? %d\n", 2161 atomic_read(&rps->num_waiters)); 2162 seq_printf(m, "Frequency requested %d\n", 2163 intel_gpu_freq(dev_priv, rps->cur_freq)); 2164 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2165 intel_gpu_freq(dev_priv, rps->min_freq), 2166 intel_gpu_freq(dev_priv, rps->min_freq_softlimit), 2167 intel_gpu_freq(dev_priv, rps->max_freq_softlimit), 2168 intel_gpu_freq(dev_priv, rps->max_freq)); 2169 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 2170 intel_gpu_freq(dev_priv, rps->idle_freq), 2171 intel_gpu_freq(dev_priv, rps->efficient_freq), 2172 intel_gpu_freq(dev_priv, rps->boost_freq)); 2173 2174 mutex_lock(&dev->filelist_mutex); 2175 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 2176 struct drm_i915_file_private *file_priv = file->driver_priv; 2177 struct task_struct *task; 2178 2179 rcu_read_lock(); 2180 task = pid_task(file->pid, PIDTYPE_PID); 2181 seq_printf(m, "%s [%d]: %d boosts\n", 2182 task ? task->comm : "<unknown>", 2183 task ? task->pid : -1, 2184 atomic_read(&file_priv->rps_client.boosts)); 2185 rcu_read_unlock(); 2186 } 2187 seq_printf(m, "Kernel (anonymous) boosts: %d\n", 2188 atomic_read(&rps->boosts)); 2189 mutex_unlock(&dev->filelist_mutex); 2190 2191 if (INTEL_GEN(dev_priv) >= 6 && 2192 rps->enabled && 2193 dev_priv->gt.active_requests) { 2194 u32 rpup, rpupei; 2195 u32 rpdown, rpdownei; 2196 2197 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 2198 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; 2199 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; 2200 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; 2201 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; 2202 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 2203 2204 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", 2205 rps_power_to_str(rps->power)); 2206 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", 2207 rpup && rpupei ? 100 * rpup / rpupei : 0, 2208 rps->up_threshold); 2209 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", 2210 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, 2211 rps->down_threshold); 2212 } else { 2213 seq_puts(m, "\nRPS Autotuning inactive\n"); 2214 } 2215 2216 return 0; 2217 } 2218 2219 static int i915_llc(struct seq_file *m, void *data) 2220 { 2221 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2222 const bool edram = INTEL_GEN(dev_priv) > 8; 2223 2224 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv))); 2225 seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC", 2226 intel_uncore_edram_size(dev_priv)/1024/1024); 2227 2228 return 0; 2229 } 2230 2231 static int i915_huc_load_status_info(struct seq_file *m, void *data) 2232 { 2233 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2234 struct drm_printer p; 2235 2236 if (!HAS_HUC(dev_priv)) 2237 return -ENODEV; 2238 2239 p = drm_seq_file_printer(m); 2240 intel_uc_fw_dump(&dev_priv->huc.fw, &p); 2241 2242 intel_runtime_pm_get(dev_priv); 2243 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2)); 2244 intel_runtime_pm_put(dev_priv); 2245 2246 return 0; 2247 } 2248 2249 static int i915_guc_load_status_info(struct seq_file *m, void *data) 2250 { 2251 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2252 struct drm_printer p; 2253 u32 tmp, i; 2254 2255 if (!HAS_GUC(dev_priv)) 2256 return -ENODEV; 2257 2258 p = drm_seq_file_printer(m); 2259 intel_uc_fw_dump(&dev_priv->guc.fw, &p); 2260 2261 intel_runtime_pm_get(dev_priv); 2262 2263 tmp = I915_READ(GUC_STATUS); 2264 2265 seq_printf(m, "\nGuC status 0x%08x:\n", tmp); 2266 seq_printf(m, "\tBootrom status = 0x%x\n", 2267 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT); 2268 seq_printf(m, "\tuKernel status = 0x%x\n", 2269 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT); 2270 seq_printf(m, "\tMIA Core status = 0x%x\n", 2271 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT); 2272 seq_puts(m, "\nScratch registers:\n"); 2273 for (i = 0; i < 16; i++) 2274 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i))); 2275 2276 intel_runtime_pm_put(dev_priv); 2277 2278 return 0; 2279 } 2280 2281 static void i915_guc_log_info(struct seq_file *m, 2282 struct drm_i915_private *dev_priv) 2283 { 2284 struct intel_guc *guc = &dev_priv->guc; 2285 2286 seq_puts(m, "\nGuC logging stats:\n"); 2287 2288 seq_printf(m, "\tISR: flush count %10u, overflow count %10u\n", 2289 guc->log.flush_count[GUC_ISR_LOG_BUFFER], 2290 guc->log.total_overflow_count[GUC_ISR_LOG_BUFFER]); 2291 2292 seq_printf(m, "\tDPC: flush count %10u, overflow count %10u\n", 2293 guc->log.flush_count[GUC_DPC_LOG_BUFFER], 2294 guc->log.total_overflow_count[GUC_DPC_LOG_BUFFER]); 2295 2296 seq_printf(m, "\tCRASH: flush count %10u, overflow count %10u\n", 2297 guc->log.flush_count[GUC_CRASH_DUMP_LOG_BUFFER], 2298 guc->log.total_overflow_count[GUC_CRASH_DUMP_LOG_BUFFER]); 2299 2300 seq_printf(m, "\tTotal flush interrupt count: %u\n", 2301 guc->log.flush_interrupt_count); 2302 2303 seq_printf(m, "\tCapture miss count: %u\n", 2304 guc->log.capture_miss_count); 2305 } 2306 2307 static void i915_guc_client_info(struct seq_file *m, 2308 struct drm_i915_private *dev_priv, 2309 struct intel_guc_client *client) 2310 { 2311 struct intel_engine_cs *engine; 2312 enum intel_engine_id id; 2313 uint64_t tot = 0; 2314 2315 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", 2316 client->priority, client->stage_id, client->proc_desc_offset); 2317 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n", 2318 client->doorbell_id, client->doorbell_offset); 2319 2320 for_each_engine(engine, dev_priv, id) { 2321 u64 submissions = client->submissions[id]; 2322 tot += submissions; 2323 seq_printf(m, "\tSubmissions: %llu %s\n", 2324 submissions, engine->name); 2325 } 2326 seq_printf(m, "\tTotal: %llu\n", tot); 2327 } 2328 2329 static int i915_guc_info(struct seq_file *m, void *data) 2330 { 2331 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2332 const struct intel_guc *guc = &dev_priv->guc; 2333 2334 if (!USES_GUC_SUBMISSION(dev_priv)) 2335 return -ENODEV; 2336 2337 GEM_BUG_ON(!guc->execbuf_client); 2338 GEM_BUG_ON(!guc->preempt_client); 2339 2340 seq_printf(m, "Doorbell map:\n"); 2341 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); 2342 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline); 2343 2344 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); 2345 i915_guc_client_info(m, dev_priv, guc->execbuf_client); 2346 seq_printf(m, "\nGuC preempt client @ %p:\n", guc->preempt_client); 2347 i915_guc_client_info(m, dev_priv, guc->preempt_client); 2348 2349 i915_guc_log_info(m, dev_priv); 2350 2351 /* Add more as required ... */ 2352 2353 return 0; 2354 } 2355 2356 static int i915_guc_stage_pool(struct seq_file *m, void *data) 2357 { 2358 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2359 const struct intel_guc *guc = &dev_priv->guc; 2360 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; 2361 struct intel_guc_client *client = guc->execbuf_client; 2362 unsigned int tmp; 2363 int index; 2364 2365 if (!USES_GUC_SUBMISSION(dev_priv)) 2366 return -ENODEV; 2367 2368 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) { 2369 struct intel_engine_cs *engine; 2370 2371 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE)) 2372 continue; 2373 2374 seq_printf(m, "GuC stage descriptor %u:\n", index); 2375 seq_printf(m, "\tIndex: %u\n", desc->stage_id); 2376 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute); 2377 seq_printf(m, "\tPriority: %d\n", desc->priority); 2378 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id); 2379 seq_printf(m, "\tEngines used: 0x%x\n", 2380 desc->engines_used); 2381 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n", 2382 desc->db_trigger_phy, 2383 desc->db_trigger_cpu, 2384 desc->db_trigger_uk); 2385 seq_printf(m, "\tProcess descriptor: 0x%x\n", 2386 desc->process_desc); 2387 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n", 2388 desc->wq_addr, desc->wq_size); 2389 seq_putc(m, '\n'); 2390 2391 for_each_engine_masked(engine, dev_priv, client->engines, tmp) { 2392 u32 guc_engine_id = engine->guc_id; 2393 struct guc_execlist_context *lrc = 2394 &desc->lrc[guc_engine_id]; 2395 2396 seq_printf(m, "\t%s LRC:\n", engine->name); 2397 seq_printf(m, "\t\tContext desc: 0x%x\n", 2398 lrc->context_desc); 2399 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id); 2400 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca); 2401 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin); 2402 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end); 2403 seq_putc(m, '\n'); 2404 } 2405 } 2406 2407 return 0; 2408 } 2409 2410 static int i915_guc_log_dump(struct seq_file *m, void *data) 2411 { 2412 struct drm_info_node *node = m->private; 2413 struct drm_i915_private *dev_priv = node_to_i915(node); 2414 bool dump_load_err = !!node->info_ent->data; 2415 struct drm_i915_gem_object *obj = NULL; 2416 u32 *log; 2417 int i = 0; 2418 2419 if (!HAS_GUC(dev_priv)) 2420 return -ENODEV; 2421 2422 if (dump_load_err) 2423 obj = dev_priv->guc.load_err_log; 2424 else if (dev_priv->guc.log.vma) 2425 obj = dev_priv->guc.log.vma->obj; 2426 2427 if (!obj) 2428 return 0; 2429 2430 log = i915_gem_object_pin_map(obj, I915_MAP_WC); 2431 if (IS_ERR(log)) { 2432 DRM_DEBUG("Failed to pin object\n"); 2433 seq_puts(m, "(log data unaccessible)\n"); 2434 return PTR_ERR(log); 2435 } 2436 2437 for (i = 0; i < obj->base.size / sizeof(u32); i += 4) 2438 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n", 2439 *(log + i), *(log + i + 1), 2440 *(log + i + 2), *(log + i + 3)); 2441 2442 seq_putc(m, '\n'); 2443 2444 i915_gem_object_unpin_map(obj); 2445 2446 return 0; 2447 } 2448 2449 static int i915_guc_log_control_get(void *data, u64 *val) 2450 { 2451 struct drm_i915_private *dev_priv = data; 2452 2453 if (!HAS_GUC(dev_priv)) 2454 return -ENODEV; 2455 2456 if (!dev_priv->guc.log.vma) 2457 return -EINVAL; 2458 2459 *val = i915_modparams.guc_log_level; 2460 2461 return 0; 2462 } 2463 2464 static int i915_guc_log_control_set(void *data, u64 val) 2465 { 2466 struct drm_i915_private *dev_priv = data; 2467 int ret; 2468 2469 if (!HAS_GUC(dev_priv)) 2470 return -ENODEV; 2471 2472 if (!dev_priv->guc.log.vma) 2473 return -EINVAL; 2474 2475 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 2476 if (ret) 2477 return ret; 2478 2479 intel_runtime_pm_get(dev_priv); 2480 ret = i915_guc_log_control(dev_priv, val); 2481 intel_runtime_pm_put(dev_priv); 2482 2483 mutex_unlock(&dev_priv->drm.struct_mutex); 2484 return ret; 2485 } 2486 2487 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops, 2488 i915_guc_log_control_get, i915_guc_log_control_set, 2489 "%lld\n"); 2490 2491 static const char *psr2_live_status(u32 val) 2492 { 2493 static const char * const live_status[] = { 2494 "IDLE", 2495 "CAPTURE", 2496 "CAPTURE_FS", 2497 "SLEEP", 2498 "BUFON_FW", 2499 "ML_UP", 2500 "SU_STANDBY", 2501 "FAST_SLEEP", 2502 "DEEP_SLEEP", 2503 "BUF_ON", 2504 "TG_ON" 2505 }; 2506 2507 val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; 2508 if (val < ARRAY_SIZE(live_status)) 2509 return live_status[val]; 2510 2511 return "unknown"; 2512 } 2513 2514 static int i915_edp_psr_status(struct seq_file *m, void *data) 2515 { 2516 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2517 u32 psrperf = 0; 2518 u32 stat[3]; 2519 enum pipe pipe; 2520 bool enabled = false; 2521 2522 if (!HAS_PSR(dev_priv)) 2523 return -ENODEV; 2524 2525 intel_runtime_pm_get(dev_priv); 2526 2527 mutex_lock(&dev_priv->psr.lock); 2528 seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support)); 2529 seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok)); 2530 seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); 2531 seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active)); 2532 seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", 2533 dev_priv->psr.busy_frontbuffer_bits); 2534 seq_printf(m, "Re-enable work scheduled: %s\n", 2535 yesno(work_busy(&dev_priv->psr.work.work))); 2536 2537 if (HAS_DDI(dev_priv)) { 2538 if (dev_priv->psr.psr2_support) 2539 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE; 2540 else 2541 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; 2542 } else { 2543 for_each_pipe(dev_priv, pipe) { 2544 enum transcoder cpu_transcoder = 2545 intel_pipe_to_cpu_transcoder(dev_priv, pipe); 2546 enum intel_display_power_domain power_domain; 2547 2548 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 2549 if (!intel_display_power_get_if_enabled(dev_priv, 2550 power_domain)) 2551 continue; 2552 2553 stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & 2554 VLV_EDP_PSR_CURR_STATE_MASK; 2555 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2556 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2557 enabled = true; 2558 2559 intel_display_power_put(dev_priv, power_domain); 2560 } 2561 } 2562 2563 seq_printf(m, "Main link in standby mode: %s\n", 2564 yesno(dev_priv->psr.link_standby)); 2565 2566 seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); 2567 2568 if (!HAS_DDI(dev_priv)) 2569 for_each_pipe(dev_priv, pipe) { 2570 if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || 2571 (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) 2572 seq_printf(m, " pipe %c", pipe_name(pipe)); 2573 } 2574 seq_puts(m, "\n"); 2575 2576 /* 2577 * VLV/CHV PSR has no kind of performance counter 2578 * SKL+ Perf counter is reset to 0 everytime DC state is entered 2579 */ 2580 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2581 psrperf = I915_READ(EDP_PSR_PERF_CNT) & 2582 EDP_PSR_PERF_CNT_MASK; 2583 2584 seq_printf(m, "Performance_Counter: %u\n", psrperf); 2585 } 2586 if (dev_priv->psr.psr2_support) { 2587 u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL); 2588 2589 seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n", 2590 psr2, psr2_live_status(psr2)); 2591 } 2592 mutex_unlock(&dev_priv->psr.lock); 2593 2594 intel_runtime_pm_put(dev_priv); 2595 return 0; 2596 } 2597 2598 static int i915_sink_crc(struct seq_file *m, void *data) 2599 { 2600 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2601 struct drm_device *dev = &dev_priv->drm; 2602 struct intel_connector *connector; 2603 struct drm_connector_list_iter conn_iter; 2604 struct intel_dp *intel_dp = NULL; 2605 struct drm_modeset_acquire_ctx ctx; 2606 int ret; 2607 u8 crc[6]; 2608 2609 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); 2610 2611 drm_connector_list_iter_begin(dev, &conn_iter); 2612 2613 for_each_intel_connector_iter(connector, &conn_iter) { 2614 struct drm_crtc *crtc; 2615 struct drm_connector_state *state; 2616 struct intel_crtc_state *crtc_state; 2617 2618 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP) 2619 continue; 2620 2621 retry: 2622 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); 2623 if (ret) 2624 goto err; 2625 2626 state = connector->base.state; 2627 if (!state->best_encoder) 2628 continue; 2629 2630 crtc = state->crtc; 2631 ret = drm_modeset_lock(&crtc->mutex, &ctx); 2632 if (ret) 2633 goto err; 2634 2635 crtc_state = to_intel_crtc_state(crtc->state); 2636 if (!crtc_state->base.active) 2637 continue; 2638 2639 /* 2640 * We need to wait for all crtc updates to complete, to make 2641 * sure any pending modesets and plane updates are completed. 2642 */ 2643 if (crtc_state->base.commit) { 2644 ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done); 2645 2646 if (ret) 2647 goto err; 2648 } 2649 2650 intel_dp = enc_to_intel_dp(state->best_encoder); 2651 2652 ret = intel_dp_sink_crc(intel_dp, crtc_state, crc); 2653 if (ret) 2654 goto err; 2655 2656 seq_printf(m, "%02x%02x%02x%02x%02x%02x\n", 2657 crc[0], crc[1], crc[2], 2658 crc[3], crc[4], crc[5]); 2659 goto out; 2660 2661 err: 2662 if (ret == -EDEADLK) { 2663 ret = drm_modeset_backoff(&ctx); 2664 if (!ret) 2665 goto retry; 2666 } 2667 goto out; 2668 } 2669 ret = -ENODEV; 2670 out: 2671 drm_connector_list_iter_end(&conn_iter); 2672 drm_modeset_drop_locks(&ctx); 2673 drm_modeset_acquire_fini(&ctx); 2674 2675 return ret; 2676 } 2677 2678 static int i915_energy_uJ(struct seq_file *m, void *data) 2679 { 2680 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2681 unsigned long long power; 2682 u32 units; 2683 2684 if (INTEL_GEN(dev_priv) < 6) 2685 return -ENODEV; 2686 2687 intel_runtime_pm_get(dev_priv); 2688 2689 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) { 2690 intel_runtime_pm_put(dev_priv); 2691 return -ENODEV; 2692 } 2693 2694 units = (power & 0x1f00) >> 8; 2695 power = I915_READ(MCH_SECP_NRG_STTS); 2696 power = (1000000 * power) >> units; /* convert to uJ */ 2697 2698 intel_runtime_pm_put(dev_priv); 2699 2700 seq_printf(m, "%llu", power); 2701 2702 return 0; 2703 } 2704 2705 static int i915_runtime_pm_status(struct seq_file *m, void *unused) 2706 { 2707 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2708 struct pci_dev *pdev = dev_priv->drm.pdev; 2709 2710 if (!HAS_RUNTIME_PM(dev_priv)) 2711 seq_puts(m, "Runtime power management not supported\n"); 2712 2713 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); 2714 seq_printf(m, "IRQs disabled: %s\n", 2715 yesno(!intel_irqs_enabled(dev_priv))); 2716 #ifdef CONFIG_PM 2717 seq_printf(m, "Usage count: %d\n", 2718 atomic_read(&dev_priv->drm.dev->power.usage_count)); 2719 #else 2720 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2721 #endif 2722 seq_printf(m, "PCI device power state: %s [%d]\n", 2723 pci_power_name(pdev->current_state), 2724 pdev->current_state); 2725 2726 return 0; 2727 } 2728 2729 static int i915_power_domain_info(struct seq_file *m, void *unused) 2730 { 2731 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2732 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2733 int i; 2734 2735 mutex_lock(&power_domains->lock); 2736 2737 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); 2738 for (i = 0; i < power_domains->power_well_count; i++) { 2739 struct i915_power_well *power_well; 2740 enum intel_display_power_domain power_domain; 2741 2742 power_well = &power_domains->power_wells[i]; 2743 seq_printf(m, "%-25s %d\n", power_well->name, 2744 power_well->count); 2745 2746 for_each_power_domain(power_domain, power_well->domains) 2747 seq_printf(m, " %-23s %d\n", 2748 intel_display_power_domain_str(power_domain), 2749 power_domains->domain_use_count[power_domain]); 2750 } 2751 2752 mutex_unlock(&power_domains->lock); 2753 2754 return 0; 2755 } 2756 2757 static int i915_dmc_info(struct seq_file *m, void *unused) 2758 { 2759 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2760 struct intel_csr *csr; 2761 2762 if (!HAS_CSR(dev_priv)) 2763 return -ENODEV; 2764 2765 csr = &dev_priv->csr; 2766 2767 intel_runtime_pm_get(dev_priv); 2768 2769 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); 2770 seq_printf(m, "path: %s\n", csr->fw_path); 2771 2772 if (!csr->dmc_payload) 2773 goto out; 2774 2775 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), 2776 CSR_VERSION_MINOR(csr->version)); 2777 2778 if (IS_KABYLAKE(dev_priv) || 2779 (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) { 2780 seq_printf(m, "DC3 -> DC5 count: %d\n", 2781 I915_READ(SKL_CSR_DC3_DC5_COUNT)); 2782 seq_printf(m, "DC5 -> DC6 count: %d\n", 2783 I915_READ(SKL_CSR_DC5_DC6_COUNT)); 2784 } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) { 2785 seq_printf(m, "DC3 -> DC5 count: %d\n", 2786 I915_READ(BXT_CSR_DC3_DC5_COUNT)); 2787 } 2788 2789 out: 2790 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); 2791 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); 2792 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); 2793 2794 intel_runtime_pm_put(dev_priv); 2795 2796 return 0; 2797 } 2798 2799 static void intel_seq_print_mode(struct seq_file *m, int tabs, 2800 struct drm_display_mode *mode) 2801 { 2802 int i; 2803 2804 for (i = 0; i < tabs; i++) 2805 seq_putc(m, '\t'); 2806 2807 seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n", 2808 mode->base.id, mode->name, 2809 mode->vrefresh, mode->clock, 2810 mode->hdisplay, mode->hsync_start, 2811 mode->hsync_end, mode->htotal, 2812 mode->vdisplay, mode->vsync_start, 2813 mode->vsync_end, mode->vtotal, 2814 mode->type, mode->flags); 2815 } 2816 2817 static void intel_encoder_info(struct seq_file *m, 2818 struct intel_crtc *intel_crtc, 2819 struct intel_encoder *intel_encoder) 2820 { 2821 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2822 struct drm_device *dev = &dev_priv->drm; 2823 struct drm_crtc *crtc = &intel_crtc->base; 2824 struct intel_connector *intel_connector; 2825 struct drm_encoder *encoder; 2826 2827 encoder = &intel_encoder->base; 2828 seq_printf(m, "\tencoder %d: type: %s, connectors:\n", 2829 encoder->base.id, encoder->name); 2830 for_each_connector_on_encoder(dev, encoder, intel_connector) { 2831 struct drm_connector *connector = &intel_connector->base; 2832 seq_printf(m, "\t\tconnector %d: type: %s, status: %s", 2833 connector->base.id, 2834 connector->name, 2835 drm_get_connector_status_name(connector->status)); 2836 if (connector->status == connector_status_connected) { 2837 struct drm_display_mode *mode = &crtc->mode; 2838 seq_printf(m, ", mode:\n"); 2839 intel_seq_print_mode(m, 2, mode); 2840 } else { 2841 seq_putc(m, '\n'); 2842 } 2843 } 2844 } 2845 2846 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) 2847 { 2848 struct drm_i915_private *dev_priv = node_to_i915(m->private); 2849 struct drm_device *dev = &dev_priv->drm; 2850 struct drm_crtc *crtc = &intel_crtc->base; 2851 struct intel_encoder *intel_encoder; 2852 struct drm_plane_state *plane_state = crtc->primary->state; 2853 struct drm_framebuffer *fb = plane_state->fb; 2854 2855 if (fb) 2856 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", 2857 fb->base.id, plane_state->src_x >> 16, 2858 plane_state->src_y >> 16, fb->width, fb->height); 2859 else 2860 seq_puts(m, "\tprimary plane disabled\n"); 2861 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 2862 intel_encoder_info(m, intel_crtc, intel_encoder); 2863 } 2864 2865 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel) 2866 { 2867 struct drm_display_mode *mode = panel->fixed_mode; 2868 2869 seq_printf(m, "\tfixed mode:\n"); 2870 intel_seq_print_mode(m, 2, mode); 2871 } 2872 2873 static void intel_dp_info(struct seq_file *m, 2874 struct intel_connector *intel_connector) 2875 { 2876 struct intel_encoder *intel_encoder = intel_connector->encoder; 2877 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 2878 2879 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2880 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2881 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) 2882 intel_panel_info(m, &intel_connector->panel); 2883 2884 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports, 2885 &intel_dp->aux); 2886 } 2887 2888 static void intel_dp_mst_info(struct seq_file *m, 2889 struct intel_connector *intel_connector) 2890 { 2891 struct intel_encoder *intel_encoder = intel_connector->encoder; 2892 struct intel_dp_mst_encoder *intel_mst = 2893 enc_to_mst(&intel_encoder->base); 2894 struct intel_digital_port *intel_dig_port = intel_mst->primary; 2895 struct intel_dp *intel_dp = &intel_dig_port->dp; 2896 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, 2897 intel_connector->port); 2898 2899 seq_printf(m, "\taudio support: %s\n", yesno(has_audio)); 2900 } 2901 2902 static void intel_hdmi_info(struct seq_file *m, 2903 struct intel_connector *intel_connector) 2904 { 2905 struct intel_encoder *intel_encoder = intel_connector->encoder; 2906 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base); 2907 2908 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio)); 2909 } 2910 2911 static void intel_lvds_info(struct seq_file *m, 2912 struct intel_connector *intel_connector) 2913 { 2914 intel_panel_info(m, &intel_connector->panel); 2915 } 2916 2917 static void intel_connector_info(struct seq_file *m, 2918 struct drm_connector *connector) 2919 { 2920 struct intel_connector *intel_connector = to_intel_connector(connector); 2921 struct intel_encoder *intel_encoder = intel_connector->encoder; 2922 struct drm_display_mode *mode; 2923 2924 seq_printf(m, "connector %d: type %s, status: %s\n", 2925 connector->base.id, connector->name, 2926 drm_get_connector_status_name(connector->status)); 2927 if (connector->status == connector_status_connected) { 2928 seq_printf(m, "\tname: %s\n", connector->display_info.name); 2929 seq_printf(m, "\tphysical dimensions: %dx%dmm\n", 2930 connector->display_info.width_mm, 2931 connector->display_info.height_mm); 2932 seq_printf(m, "\tsubpixel order: %s\n", 2933 drm_get_subpixel_order_name(connector->display_info.subpixel_order)); 2934 seq_printf(m, "\tCEA rev: %d\n", 2935 connector->display_info.cea_rev); 2936 } 2937 2938 if (!intel_encoder) 2939 return; 2940 2941 switch (connector->connector_type) { 2942 case DRM_MODE_CONNECTOR_DisplayPort: 2943 case DRM_MODE_CONNECTOR_eDP: 2944 if (intel_encoder->type == INTEL_OUTPUT_DP_MST) 2945 intel_dp_mst_info(m, intel_connector); 2946 else 2947 intel_dp_info(m, intel_connector); 2948 break; 2949 case DRM_MODE_CONNECTOR_LVDS: 2950 if (intel_encoder->type == INTEL_OUTPUT_LVDS) 2951 intel_lvds_info(m, intel_connector); 2952 break; 2953 case DRM_MODE_CONNECTOR_HDMIA: 2954 if (intel_encoder->type == INTEL_OUTPUT_HDMI || 2955 intel_encoder->type == INTEL_OUTPUT_DDI) 2956 intel_hdmi_info(m, intel_connector); 2957 break; 2958 default: 2959 break; 2960 } 2961 2962 seq_printf(m, "\tmodes:\n"); 2963 list_for_each_entry(mode, &connector->modes, head) 2964 intel_seq_print_mode(m, 2, mode); 2965 } 2966 2967 static const char *plane_type(enum drm_plane_type type) 2968 { 2969 switch (type) { 2970 case DRM_PLANE_TYPE_OVERLAY: 2971 return "OVL"; 2972 case DRM_PLANE_TYPE_PRIMARY: 2973 return "PRI"; 2974 case DRM_PLANE_TYPE_CURSOR: 2975 return "CUR"; 2976 /* 2977 * Deliberately omitting default: to generate compiler warnings 2978 * when a new drm_plane_type gets added. 2979 */ 2980 } 2981 2982 return "unknown"; 2983 } 2984 2985 static const char *plane_rotation(unsigned int rotation) 2986 { 2987 static char buf[48]; 2988 /* 2989 * According to doc only one DRM_MODE_ROTATE_ is allowed but this 2990 * will print them all to visualize if the values are misused 2991 */ 2992 snprintf(buf, sizeof(buf), 2993 "%s%s%s%s%s%s(0x%08x)", 2994 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "", 2995 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "", 2996 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "", 2997 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "", 2998 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "", 2999 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "", 3000 rotation); 3001 3002 return buf; 3003 } 3004 3005 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3006 { 3007 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3008 struct drm_device *dev = &dev_priv->drm; 3009 struct intel_plane *intel_plane; 3010 3011 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3012 struct drm_plane_state *state; 3013 struct drm_plane *plane = &intel_plane->base; 3014 struct drm_format_name_buf format_name; 3015 3016 if (!plane->state) { 3017 seq_puts(m, "plane->state is NULL!\n"); 3018 continue; 3019 } 3020 3021 state = plane->state; 3022 3023 if (state->fb) { 3024 drm_get_format_name(state->fb->format->format, 3025 &format_name); 3026 } else { 3027 sprintf(format_name.str, "N/A"); 3028 } 3029 3030 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n", 3031 plane->base.id, 3032 plane_type(intel_plane->base.type), 3033 state->crtc_x, state->crtc_y, 3034 state->crtc_w, state->crtc_h, 3035 (state->src_x >> 16), 3036 ((state->src_x & 0xffff) * 15625) >> 10, 3037 (state->src_y >> 16), 3038 ((state->src_y & 0xffff) * 15625) >> 10, 3039 (state->src_w >> 16), 3040 ((state->src_w & 0xffff) * 15625) >> 10, 3041 (state->src_h >> 16), 3042 ((state->src_h & 0xffff) * 15625) >> 10, 3043 format_name.str, 3044 plane_rotation(state->rotation)); 3045 } 3046 } 3047 3048 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc) 3049 { 3050 struct intel_crtc_state *pipe_config; 3051 int num_scalers = intel_crtc->num_scalers; 3052 int i; 3053 3054 pipe_config = to_intel_crtc_state(intel_crtc->base.state); 3055 3056 /* Not all platformas have a scaler */ 3057 if (num_scalers) { 3058 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d", 3059 num_scalers, 3060 pipe_config->scaler_state.scaler_users, 3061 pipe_config->scaler_state.scaler_id); 3062 3063 for (i = 0; i < num_scalers; i++) { 3064 struct intel_scaler *sc = 3065 &pipe_config->scaler_state.scalers[i]; 3066 3067 seq_printf(m, ", scalers[%d]: use=%s, mode=%x", 3068 i, yesno(sc->in_use), sc->mode); 3069 } 3070 seq_puts(m, "\n"); 3071 } else { 3072 seq_puts(m, "\tNo scalers available on this platform\n"); 3073 } 3074 } 3075 3076 static int i915_display_info(struct seq_file *m, void *unused) 3077 { 3078 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3079 struct drm_device *dev = &dev_priv->drm; 3080 struct intel_crtc *crtc; 3081 struct drm_connector *connector; 3082 struct drm_connector_list_iter conn_iter; 3083 3084 intel_runtime_pm_get(dev_priv); 3085 seq_printf(m, "CRTC info\n"); 3086 seq_printf(m, "---------\n"); 3087 for_each_intel_crtc(dev, crtc) { 3088 struct intel_crtc_state *pipe_config; 3089 3090 drm_modeset_lock(&crtc->base.mutex, NULL); 3091 pipe_config = to_intel_crtc_state(crtc->base.state); 3092 3093 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n", 3094 crtc->base.base.id, pipe_name(crtc->pipe), 3095 yesno(pipe_config->base.active), 3096 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 3097 yesno(pipe_config->dither), pipe_config->pipe_bpp); 3098 3099 if (pipe_config->base.active) { 3100 struct intel_plane *cursor = 3101 to_intel_plane(crtc->base.cursor); 3102 3103 intel_crtc_info(m, crtc); 3104 3105 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", 3106 yesno(cursor->base.state->visible), 3107 cursor->base.state->crtc_x, 3108 cursor->base.state->crtc_y, 3109 cursor->base.state->crtc_w, 3110 cursor->base.state->crtc_h, 3111 cursor->cursor.base); 3112 intel_scaler_info(m, crtc); 3113 intel_plane_info(m, crtc); 3114 } 3115 3116 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n", 3117 yesno(!crtc->cpu_fifo_underrun_disabled), 3118 yesno(!crtc->pch_fifo_underrun_disabled)); 3119 drm_modeset_unlock(&crtc->base.mutex); 3120 } 3121 3122 seq_printf(m, "\n"); 3123 seq_printf(m, "Connector info\n"); 3124 seq_printf(m, "--------------\n"); 3125 mutex_lock(&dev->mode_config.mutex); 3126 drm_connector_list_iter_begin(dev, &conn_iter); 3127 drm_for_each_connector_iter(connector, &conn_iter) 3128 intel_connector_info(m, connector); 3129 drm_connector_list_iter_end(&conn_iter); 3130 mutex_unlock(&dev->mode_config.mutex); 3131 3132 intel_runtime_pm_put(dev_priv); 3133 3134 return 0; 3135 } 3136 3137 static int i915_engine_info(struct seq_file *m, void *unused) 3138 { 3139 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3140 struct intel_engine_cs *engine; 3141 enum intel_engine_id id; 3142 struct drm_printer p; 3143 3144 intel_runtime_pm_get(dev_priv); 3145 3146 seq_printf(m, "GT awake? %s\n", 3147 yesno(dev_priv->gt.awake)); 3148 seq_printf(m, "Global active requests: %d\n", 3149 dev_priv->gt.active_requests); 3150 seq_printf(m, "CS timestamp frequency: %u kHz\n", 3151 dev_priv->info.cs_timestamp_frequency_khz); 3152 3153 p = drm_seq_file_printer(m); 3154 for_each_engine(engine, dev_priv, id) 3155 intel_engine_dump(engine, &p, "%s\n", engine->name); 3156 3157 intel_runtime_pm_put(dev_priv); 3158 3159 return 0; 3160 } 3161 3162 static int i915_shrinker_info(struct seq_file *m, void *unused) 3163 { 3164 struct drm_i915_private *i915 = node_to_i915(m->private); 3165 3166 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks); 3167 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch); 3168 3169 return 0; 3170 } 3171 3172 static int i915_shared_dplls_info(struct seq_file *m, void *unused) 3173 { 3174 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3175 struct drm_device *dev = &dev_priv->drm; 3176 int i; 3177 3178 drm_modeset_lock_all(dev); 3179 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3180 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 3181 3182 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 3183 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", 3184 pll->state.crtc_mask, pll->active_mask, yesno(pll->on)); 3185 seq_printf(m, " tracked hardware state:\n"); 3186 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll); 3187 seq_printf(m, " dpll_md: 0x%08x\n", 3188 pll->state.hw_state.dpll_md); 3189 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0); 3190 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1); 3191 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll); 3192 } 3193 drm_modeset_unlock_all(dev); 3194 3195 return 0; 3196 } 3197 3198 static int i915_wa_registers(struct seq_file *m, void *unused) 3199 { 3200 int i; 3201 int ret; 3202 struct intel_engine_cs *engine; 3203 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3204 struct drm_device *dev = &dev_priv->drm; 3205 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3206 enum intel_engine_id id; 3207 3208 ret = mutex_lock_interruptible(&dev->struct_mutex); 3209 if (ret) 3210 return ret; 3211 3212 intel_runtime_pm_get(dev_priv); 3213 3214 seq_printf(m, "Workarounds applied: %d\n", workarounds->count); 3215 for_each_engine(engine, dev_priv, id) 3216 seq_printf(m, "HW whitelist count for %s: %d\n", 3217 engine->name, workarounds->hw_whitelist_count[id]); 3218 for (i = 0; i < workarounds->count; ++i) { 3219 i915_reg_t addr; 3220 u32 mask, value, read; 3221 bool ok; 3222 3223 addr = workarounds->reg[i].addr; 3224 mask = workarounds->reg[i].mask; 3225 value = workarounds->reg[i].value; 3226 read = I915_READ(addr); 3227 ok = (value & mask) == (read & mask); 3228 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", 3229 i915_mmio_reg_offset(addr), value, mask, read, ok ? "OK" : "FAIL"); 3230 } 3231 3232 intel_runtime_pm_put(dev_priv); 3233 mutex_unlock(&dev->struct_mutex); 3234 3235 return 0; 3236 } 3237 3238 static int i915_ipc_status_show(struct seq_file *m, void *data) 3239 { 3240 struct drm_i915_private *dev_priv = m->private; 3241 3242 seq_printf(m, "Isochronous Priority Control: %s\n", 3243 yesno(dev_priv->ipc_enabled)); 3244 return 0; 3245 } 3246 3247 static int i915_ipc_status_open(struct inode *inode, struct file *file) 3248 { 3249 struct drm_i915_private *dev_priv = inode->i_private; 3250 3251 if (!HAS_IPC(dev_priv)) 3252 return -ENODEV; 3253 3254 return single_open(file, i915_ipc_status_show, dev_priv); 3255 } 3256 3257 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, 3258 size_t len, loff_t *offp) 3259 { 3260 struct seq_file *m = file->private_data; 3261 struct drm_i915_private *dev_priv = m->private; 3262 int ret; 3263 bool enable; 3264 3265 ret = kstrtobool_from_user(ubuf, len, &enable); 3266 if (ret < 0) 3267 return ret; 3268 3269 intel_runtime_pm_get(dev_priv); 3270 if (!dev_priv->ipc_enabled && enable) 3271 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); 3272 dev_priv->wm.distrust_bios_wm = true; 3273 dev_priv->ipc_enabled = enable; 3274 intel_enable_ipc(dev_priv); 3275 intel_runtime_pm_put(dev_priv); 3276 3277 return len; 3278 } 3279 3280 static const struct file_operations i915_ipc_status_fops = { 3281 .owner = THIS_MODULE, 3282 .open = i915_ipc_status_open, 3283 .read = seq_read, 3284 .llseek = seq_lseek, 3285 .release = single_release, 3286 .write = i915_ipc_status_write 3287 }; 3288 3289 static int i915_ddb_info(struct seq_file *m, void *unused) 3290 { 3291 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3292 struct drm_device *dev = &dev_priv->drm; 3293 struct skl_ddb_allocation *ddb; 3294 struct skl_ddb_entry *entry; 3295 enum pipe pipe; 3296 int plane; 3297 3298 if (INTEL_GEN(dev_priv) < 9) 3299 return -ENODEV; 3300 3301 drm_modeset_lock_all(dev); 3302 3303 ddb = &dev_priv->wm.skl_hw.ddb; 3304 3305 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size"); 3306 3307 for_each_pipe(dev_priv, pipe) { 3308 seq_printf(m, "Pipe %c\n", pipe_name(pipe)); 3309 3310 for_each_universal_plane(dev_priv, pipe, plane) { 3311 entry = &ddb->plane[pipe][plane]; 3312 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, 3313 entry->start, entry->end, 3314 skl_ddb_entry_size(entry)); 3315 } 3316 3317 entry = &ddb->plane[pipe][PLANE_CURSOR]; 3318 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3319 entry->end, skl_ddb_entry_size(entry)); 3320 } 3321 3322 drm_modeset_unlock_all(dev); 3323 3324 return 0; 3325 } 3326 3327 static void drrs_status_per_crtc(struct seq_file *m, 3328 struct drm_device *dev, 3329 struct intel_crtc *intel_crtc) 3330 { 3331 struct drm_i915_private *dev_priv = to_i915(dev); 3332 struct i915_drrs *drrs = &dev_priv->drrs; 3333 int vrefresh = 0; 3334 struct drm_connector *connector; 3335 struct drm_connector_list_iter conn_iter; 3336 3337 drm_connector_list_iter_begin(dev, &conn_iter); 3338 drm_for_each_connector_iter(connector, &conn_iter) { 3339 if (connector->state->crtc != &intel_crtc->base) 3340 continue; 3341 3342 seq_printf(m, "%s:\n", connector->name); 3343 } 3344 drm_connector_list_iter_end(&conn_iter); 3345 3346 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3347 seq_puts(m, "\tVBT: DRRS_type: Static"); 3348 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) 3349 seq_puts(m, "\tVBT: DRRS_type: Seamless"); 3350 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) 3351 seq_puts(m, "\tVBT: DRRS_type: None"); 3352 else 3353 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); 3354 3355 seq_puts(m, "\n\n"); 3356 3357 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) { 3358 struct intel_panel *panel; 3359 3360 mutex_lock(&drrs->mutex); 3361 /* DRRS Supported */ 3362 seq_puts(m, "\tDRRS Supported: Yes\n"); 3363 3364 /* disable_drrs() will make drrs->dp NULL */ 3365 if (!drrs->dp) { 3366 seq_puts(m, "Idleness DRRS: Disabled"); 3367 mutex_unlock(&drrs->mutex); 3368 return; 3369 } 3370 3371 panel = &drrs->dp->attached_connector->panel; 3372 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", 3373 drrs->busy_frontbuffer_bits); 3374 3375 seq_puts(m, "\n\t\t"); 3376 if (drrs->refresh_rate_type == DRRS_HIGH_RR) { 3377 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); 3378 vrefresh = panel->fixed_mode->vrefresh; 3379 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { 3380 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); 3381 vrefresh = panel->downclock_mode->vrefresh; 3382 } else { 3383 seq_printf(m, "DRRS_State: Unknown(%d)\n", 3384 drrs->refresh_rate_type); 3385 mutex_unlock(&drrs->mutex); 3386 return; 3387 } 3388 seq_printf(m, "\t\tVrefresh: %d", vrefresh); 3389 3390 seq_puts(m, "\n\t\t"); 3391 mutex_unlock(&drrs->mutex); 3392 } else { 3393 /* DRRS not supported. Print the VBT parameter*/ 3394 seq_puts(m, "\tDRRS Supported : No"); 3395 } 3396 seq_puts(m, "\n"); 3397 } 3398 3399 static int i915_drrs_status(struct seq_file *m, void *unused) 3400 { 3401 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3402 struct drm_device *dev = &dev_priv->drm; 3403 struct intel_crtc *intel_crtc; 3404 int active_crtc_cnt = 0; 3405 3406 drm_modeset_lock_all(dev); 3407 for_each_intel_crtc(dev, intel_crtc) { 3408 if (intel_crtc->base.state->active) { 3409 active_crtc_cnt++; 3410 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3411 3412 drrs_status_per_crtc(m, dev, intel_crtc); 3413 } 3414 } 3415 drm_modeset_unlock_all(dev); 3416 3417 if (!active_crtc_cnt) 3418 seq_puts(m, "No active crtc found\n"); 3419 3420 return 0; 3421 } 3422 3423 static int i915_dp_mst_info(struct seq_file *m, void *unused) 3424 { 3425 struct drm_i915_private *dev_priv = node_to_i915(m->private); 3426 struct drm_device *dev = &dev_priv->drm; 3427 struct intel_encoder *intel_encoder; 3428 struct intel_digital_port *intel_dig_port; 3429 struct drm_connector *connector; 3430 struct drm_connector_list_iter conn_iter; 3431 3432 drm_connector_list_iter_begin(dev, &conn_iter); 3433 drm_for_each_connector_iter(connector, &conn_iter) { 3434 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) 3435 continue; 3436 3437 intel_encoder = intel_attached_encoder(connector); 3438 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) 3439 continue; 3440 3441 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 3442 if (!intel_dig_port->dp.can_mst) 3443 continue; 3444 3445 seq_printf(m, "MST Source Port %c\n", 3446 port_name(intel_dig_port->base.port)); 3447 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3448 } 3449 drm_connector_list_iter_end(&conn_iter); 3450 3451 return 0; 3452 } 3453 3454 static ssize_t i915_displayport_test_active_write(struct file *file, 3455 const char __user *ubuf, 3456 size_t len, loff_t *offp) 3457 { 3458 char *input_buffer; 3459 int status = 0; 3460 struct drm_device *dev; 3461 struct drm_connector *connector; 3462 struct drm_connector_list_iter conn_iter; 3463 struct intel_dp *intel_dp; 3464 int val = 0; 3465 3466 dev = ((struct seq_file *)file->private_data)->private; 3467 3468 if (len == 0) 3469 return 0; 3470 3471 input_buffer = memdup_user_nul(ubuf, len); 3472 if (IS_ERR(input_buffer)) 3473 return PTR_ERR(input_buffer); 3474 3475 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len); 3476 3477 drm_connector_list_iter_begin(dev, &conn_iter); 3478 drm_for_each_connector_iter(connector, &conn_iter) { 3479 struct intel_encoder *encoder; 3480 3481 if (connector->connector_type != 3482 DRM_MODE_CONNECTOR_DisplayPort) 3483 continue; 3484 3485 encoder = to_intel_encoder(connector->encoder); 3486 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3487 continue; 3488 3489 if (encoder && connector->status == connector_status_connected) { 3490 intel_dp = enc_to_intel_dp(&encoder->base); 3491 status = kstrtoint(input_buffer, 10, &val); 3492 if (status < 0) 3493 break; 3494 DRM_DEBUG_DRIVER("Got %d for test active\n", val); 3495 /* To prevent erroneous activation of the compliance 3496 * testing code, only accept an actual value of 1 here 3497 */ 3498 if (val == 1) 3499 intel_dp->compliance.test_active = 1; 3500 else 3501 intel_dp->compliance.test_active = 0; 3502 } 3503 } 3504 drm_connector_list_iter_end(&conn_iter); 3505 kfree(input_buffer); 3506 if (status < 0) 3507 return status; 3508 3509 *offp += len; 3510 return len; 3511 } 3512 3513 static int i915_displayport_test_active_show(struct seq_file *m, void *data) 3514 { 3515 struct drm_device *dev = m->private; 3516 struct drm_connector *connector; 3517 struct drm_connector_list_iter conn_iter; 3518 struct intel_dp *intel_dp; 3519 3520 drm_connector_list_iter_begin(dev, &conn_iter); 3521 drm_for_each_connector_iter(connector, &conn_iter) { 3522 struct intel_encoder *encoder; 3523 3524 if (connector->connector_type != 3525 DRM_MODE_CONNECTOR_DisplayPort) 3526 continue; 3527 3528 encoder = to_intel_encoder(connector->encoder); 3529 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3530 continue; 3531 3532 if (encoder && connector->status == connector_status_connected) { 3533 intel_dp = enc_to_intel_dp(&encoder->base); 3534 if (intel_dp->compliance.test_active) 3535 seq_puts(m, "1"); 3536 else 3537 seq_puts(m, "0"); 3538 } else 3539 seq_puts(m, "0"); 3540 } 3541 drm_connector_list_iter_end(&conn_iter); 3542 3543 return 0; 3544 } 3545 3546 static int i915_displayport_test_active_open(struct inode *inode, 3547 struct file *file) 3548 { 3549 struct drm_i915_private *dev_priv = inode->i_private; 3550 3551 return single_open(file, i915_displayport_test_active_show, 3552 &dev_priv->drm); 3553 } 3554 3555 static const struct file_operations i915_displayport_test_active_fops = { 3556 .owner = THIS_MODULE, 3557 .open = i915_displayport_test_active_open, 3558 .read = seq_read, 3559 .llseek = seq_lseek, 3560 .release = single_release, 3561 .write = i915_displayport_test_active_write 3562 }; 3563 3564 static int i915_displayport_test_data_show(struct seq_file *m, void *data) 3565 { 3566 struct drm_device *dev = m->private; 3567 struct drm_connector *connector; 3568 struct drm_connector_list_iter conn_iter; 3569 struct intel_dp *intel_dp; 3570 3571 drm_connector_list_iter_begin(dev, &conn_iter); 3572 drm_for_each_connector_iter(connector, &conn_iter) { 3573 struct intel_encoder *encoder; 3574 3575 if (connector->connector_type != 3576 DRM_MODE_CONNECTOR_DisplayPort) 3577 continue; 3578 3579 encoder = to_intel_encoder(connector->encoder); 3580 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3581 continue; 3582 3583 if (encoder && connector->status == connector_status_connected) { 3584 intel_dp = enc_to_intel_dp(&encoder->base); 3585 if (intel_dp->compliance.test_type == 3586 DP_TEST_LINK_EDID_READ) 3587 seq_printf(m, "%lx", 3588 intel_dp->compliance.test_data.edid); 3589 else if (intel_dp->compliance.test_type == 3590 DP_TEST_LINK_VIDEO_PATTERN) { 3591 seq_printf(m, "hdisplay: %d\n", 3592 intel_dp->compliance.test_data.hdisplay); 3593 seq_printf(m, "vdisplay: %d\n", 3594 intel_dp->compliance.test_data.vdisplay); 3595 seq_printf(m, "bpc: %u\n", 3596 intel_dp->compliance.test_data.bpc); 3597 } 3598 } else 3599 seq_puts(m, "0"); 3600 } 3601 drm_connector_list_iter_end(&conn_iter); 3602 3603 return 0; 3604 } 3605 static int i915_displayport_test_data_open(struct inode *inode, 3606 struct file *file) 3607 { 3608 struct drm_i915_private *dev_priv = inode->i_private; 3609 3610 return single_open(file, i915_displayport_test_data_show, 3611 &dev_priv->drm); 3612 } 3613 3614 static const struct file_operations i915_displayport_test_data_fops = { 3615 .owner = THIS_MODULE, 3616 .open = i915_displayport_test_data_open, 3617 .read = seq_read, 3618 .llseek = seq_lseek, 3619 .release = single_release 3620 }; 3621 3622 static int i915_displayport_test_type_show(struct seq_file *m, void *data) 3623 { 3624 struct drm_device *dev = m->private; 3625 struct drm_connector *connector; 3626 struct drm_connector_list_iter conn_iter; 3627 struct intel_dp *intel_dp; 3628 3629 drm_connector_list_iter_begin(dev, &conn_iter); 3630 drm_for_each_connector_iter(connector, &conn_iter) { 3631 struct intel_encoder *encoder; 3632 3633 if (connector->connector_type != 3634 DRM_MODE_CONNECTOR_DisplayPort) 3635 continue; 3636 3637 encoder = to_intel_encoder(connector->encoder); 3638 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST) 3639 continue; 3640 3641 if (encoder && connector->status == connector_status_connected) { 3642 intel_dp = enc_to_intel_dp(&encoder->base); 3643 seq_printf(m, "%02lx", intel_dp->compliance.test_type); 3644 } else 3645 seq_puts(m, "0"); 3646 } 3647 drm_connector_list_iter_end(&conn_iter); 3648 3649 return 0; 3650 } 3651 3652 static int i915_displayport_test_type_open(struct inode *inode, 3653 struct file *file) 3654 { 3655 struct drm_i915_private *dev_priv = inode->i_private; 3656 3657 return single_open(file, i915_displayport_test_type_show, 3658 &dev_priv->drm); 3659 } 3660 3661 static const struct file_operations i915_displayport_test_type_fops = { 3662 .owner = THIS_MODULE, 3663 .open = i915_displayport_test_type_open, 3664 .read = seq_read, 3665 .llseek = seq_lseek, 3666 .release = single_release 3667 }; 3668 3669 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8]) 3670 { 3671 struct drm_i915_private *dev_priv = m->private; 3672 struct drm_device *dev = &dev_priv->drm; 3673 int level; 3674 int num_levels; 3675 3676 if (IS_CHERRYVIEW(dev_priv)) 3677 num_levels = 3; 3678 else if (IS_VALLEYVIEW(dev_priv)) 3679 num_levels = 1; 3680 else if (IS_G4X(dev_priv)) 3681 num_levels = 3; 3682 else 3683 num_levels = ilk_wm_max_level(dev_priv) + 1; 3684 3685 drm_modeset_lock_all(dev); 3686 3687 for (level = 0; level < num_levels; level++) { 3688 unsigned int latency = wm[level]; 3689 3690 /* 3691 * - WM1+ latency values in 0.5us units 3692 * - latencies are in us on gen9/vlv/chv 3693 */ 3694 if (INTEL_GEN(dev_priv) >= 9 || 3695 IS_VALLEYVIEW(dev_priv) || 3696 IS_CHERRYVIEW(dev_priv) || 3697 IS_G4X(dev_priv)) 3698 latency *= 10; 3699 else if (level > 0) 3700 latency *= 5; 3701 3702 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3703 level, wm[level], latency / 10, latency % 10); 3704 } 3705 3706 drm_modeset_unlock_all(dev); 3707 } 3708 3709 static int pri_wm_latency_show(struct seq_file *m, void *data) 3710 { 3711 struct drm_i915_private *dev_priv = m->private; 3712 const uint16_t *latencies; 3713 3714 if (INTEL_GEN(dev_priv) >= 9) 3715 latencies = dev_priv->wm.skl_latency; 3716 else 3717 latencies = dev_priv->wm.pri_latency; 3718 3719 wm_latency_show(m, latencies); 3720 3721 return 0; 3722 } 3723 3724 static int spr_wm_latency_show(struct seq_file *m, void *data) 3725 { 3726 struct drm_i915_private *dev_priv = m->private; 3727 const uint16_t *latencies; 3728 3729 if (INTEL_GEN(dev_priv) >= 9) 3730 latencies = dev_priv->wm.skl_latency; 3731 else 3732 latencies = dev_priv->wm.spr_latency; 3733 3734 wm_latency_show(m, latencies); 3735 3736 return 0; 3737 } 3738 3739 static int cur_wm_latency_show(struct seq_file *m, void *data) 3740 { 3741 struct drm_i915_private *dev_priv = m->private; 3742 const uint16_t *latencies; 3743 3744 if (INTEL_GEN(dev_priv) >= 9) 3745 latencies = dev_priv->wm.skl_latency; 3746 else 3747 latencies = dev_priv->wm.cur_latency; 3748 3749 wm_latency_show(m, latencies); 3750 3751 return 0; 3752 } 3753 3754 static int pri_wm_latency_open(struct inode *inode, struct file *file) 3755 { 3756 struct drm_i915_private *dev_priv = inode->i_private; 3757 3758 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 3759 return -ENODEV; 3760 3761 return single_open(file, pri_wm_latency_show, dev_priv); 3762 } 3763 3764 static int spr_wm_latency_open(struct inode *inode, struct file *file) 3765 { 3766 struct drm_i915_private *dev_priv = inode->i_private; 3767 3768 if (HAS_GMCH_DISPLAY(dev_priv)) 3769 return -ENODEV; 3770 3771 return single_open(file, spr_wm_latency_show, dev_priv); 3772 } 3773 3774 static int cur_wm_latency_open(struct inode *inode, struct file *file) 3775 { 3776 struct drm_i915_private *dev_priv = inode->i_private; 3777 3778 if (HAS_GMCH_DISPLAY(dev_priv)) 3779 return -ENODEV; 3780 3781 return single_open(file, cur_wm_latency_show, dev_priv); 3782 } 3783 3784 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3785 size_t len, loff_t *offp, uint16_t wm[8]) 3786 { 3787 struct seq_file *m = file->private_data; 3788 struct drm_i915_private *dev_priv = m->private; 3789 struct drm_device *dev = &dev_priv->drm; 3790 uint16_t new[8] = { 0 }; 3791 int num_levels; 3792 int level; 3793 int ret; 3794 char tmp[32]; 3795 3796 if (IS_CHERRYVIEW(dev_priv)) 3797 num_levels = 3; 3798 else if (IS_VALLEYVIEW(dev_priv)) 3799 num_levels = 1; 3800 else if (IS_G4X(dev_priv)) 3801 num_levels = 3; 3802 else 3803 num_levels = ilk_wm_max_level(dev_priv) + 1; 3804 3805 if (len >= sizeof(tmp)) 3806 return -EINVAL; 3807 3808 if (copy_from_user(tmp, ubuf, len)) 3809 return -EFAULT; 3810 3811 tmp[len] = '\0'; 3812 3813 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", 3814 &new[0], &new[1], &new[2], &new[3], 3815 &new[4], &new[5], &new[6], &new[7]); 3816 if (ret != num_levels) 3817 return -EINVAL; 3818 3819 drm_modeset_lock_all(dev); 3820 3821 for (level = 0; level < num_levels; level++) 3822 wm[level] = new[level]; 3823 3824 drm_modeset_unlock_all(dev); 3825 3826 return len; 3827 } 3828 3829 3830 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, 3831 size_t len, loff_t *offp) 3832 { 3833 struct seq_file *m = file->private_data; 3834 struct drm_i915_private *dev_priv = m->private; 3835 uint16_t *latencies; 3836 3837 if (INTEL_GEN(dev_priv) >= 9) 3838 latencies = dev_priv->wm.skl_latency; 3839 else 3840 latencies = dev_priv->wm.pri_latency; 3841 3842 return wm_latency_write(file, ubuf, len, offp, latencies); 3843 } 3844 3845 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 3846 size_t len, loff_t *offp) 3847 { 3848 struct seq_file *m = file->private_data; 3849 struct drm_i915_private *dev_priv = m->private; 3850 uint16_t *latencies; 3851 3852 if (INTEL_GEN(dev_priv) >= 9) 3853 latencies = dev_priv->wm.skl_latency; 3854 else 3855 latencies = dev_priv->wm.spr_latency; 3856 3857 return wm_latency_write(file, ubuf, len, offp, latencies); 3858 } 3859 3860 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 3861 size_t len, loff_t *offp) 3862 { 3863 struct seq_file *m = file->private_data; 3864 struct drm_i915_private *dev_priv = m->private; 3865 uint16_t *latencies; 3866 3867 if (INTEL_GEN(dev_priv) >= 9) 3868 latencies = dev_priv->wm.skl_latency; 3869 else 3870 latencies = dev_priv->wm.cur_latency; 3871 3872 return wm_latency_write(file, ubuf, len, offp, latencies); 3873 } 3874 3875 static const struct file_operations i915_pri_wm_latency_fops = { 3876 .owner = THIS_MODULE, 3877 .open = pri_wm_latency_open, 3878 .read = seq_read, 3879 .llseek = seq_lseek, 3880 .release = single_release, 3881 .write = pri_wm_latency_write 3882 }; 3883 3884 static const struct file_operations i915_spr_wm_latency_fops = { 3885 .owner = THIS_MODULE, 3886 .open = spr_wm_latency_open, 3887 .read = seq_read, 3888 .llseek = seq_lseek, 3889 .release = single_release, 3890 .write = spr_wm_latency_write 3891 }; 3892 3893 static const struct file_operations i915_cur_wm_latency_fops = { 3894 .owner = THIS_MODULE, 3895 .open = cur_wm_latency_open, 3896 .read = seq_read, 3897 .llseek = seq_lseek, 3898 .release = single_release, 3899 .write = cur_wm_latency_write 3900 }; 3901 3902 static int 3903 i915_wedged_get(void *data, u64 *val) 3904 { 3905 struct drm_i915_private *dev_priv = data; 3906 3907 *val = i915_terminally_wedged(&dev_priv->gpu_error); 3908 3909 return 0; 3910 } 3911 3912 static int 3913 i915_wedged_set(void *data, u64 val) 3914 { 3915 struct drm_i915_private *i915 = data; 3916 struct intel_engine_cs *engine; 3917 unsigned int tmp; 3918 3919 /* 3920 * There is no safeguard against this debugfs entry colliding 3921 * with the hangcheck calling same i915_handle_error() in 3922 * parallel, causing an explosion. For now we assume that the 3923 * test harness is responsible enough not to inject gpu hangs 3924 * while it is writing to 'i915_wedged' 3925 */ 3926 3927 if (i915_reset_backoff(&i915->gpu_error)) 3928 return -EAGAIN; 3929 3930 for_each_engine_masked(engine, i915, val, tmp) { 3931 engine->hangcheck.seqno = intel_engine_get_seqno(engine); 3932 engine->hangcheck.stalled = true; 3933 } 3934 3935 i915_handle_error(i915, val, "Manually setting wedged to %llu", val); 3936 3937 wait_on_bit(&i915->gpu_error.flags, 3938 I915_RESET_HANDOFF, 3939 TASK_UNINTERRUPTIBLE); 3940 3941 return 0; 3942 } 3943 3944 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, 3945 i915_wedged_get, i915_wedged_set, 3946 "%llu\n"); 3947 3948 static int 3949 fault_irq_set(struct drm_i915_private *i915, 3950 unsigned long *irq, 3951 unsigned long val) 3952 { 3953 int err; 3954 3955 err = mutex_lock_interruptible(&i915->drm.struct_mutex); 3956 if (err) 3957 return err; 3958 3959 err = i915_gem_wait_for_idle(i915, 3960 I915_WAIT_LOCKED | 3961 I915_WAIT_INTERRUPTIBLE); 3962 if (err) 3963 goto err_unlock; 3964 3965 *irq = val; 3966 mutex_unlock(&i915->drm.struct_mutex); 3967 3968 /* Flush idle worker to disarm irq */ 3969 drain_delayed_work(&i915->gt.idle_work); 3970 3971 return 0; 3972 3973 err_unlock: 3974 mutex_unlock(&i915->drm.struct_mutex); 3975 return err; 3976 } 3977 3978 static int 3979 i915_ring_missed_irq_get(void *data, u64 *val) 3980 { 3981 struct drm_i915_private *dev_priv = data; 3982 3983 *val = dev_priv->gpu_error.missed_irq_rings; 3984 return 0; 3985 } 3986 3987 static int 3988 i915_ring_missed_irq_set(void *data, u64 val) 3989 { 3990 struct drm_i915_private *i915 = data; 3991 3992 return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val); 3993 } 3994 3995 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops, 3996 i915_ring_missed_irq_get, i915_ring_missed_irq_set, 3997 "0x%08llx\n"); 3998 3999 static int 4000 i915_ring_test_irq_get(void *data, u64 *val) 4001 { 4002 struct drm_i915_private *dev_priv = data; 4003 4004 *val = dev_priv->gpu_error.test_irq_rings; 4005 4006 return 0; 4007 } 4008 4009 static int 4010 i915_ring_test_irq_set(void *data, u64 val) 4011 { 4012 struct drm_i915_private *i915 = data; 4013 4014 val &= INTEL_INFO(i915)->ring_mask; 4015 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4016 4017 return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val); 4018 } 4019 4020 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops, 4021 i915_ring_test_irq_get, i915_ring_test_irq_set, 4022 "0x%08llx\n"); 4023 4024 #define DROP_UNBOUND BIT(0) 4025 #define DROP_BOUND BIT(1) 4026 #define DROP_RETIRE BIT(2) 4027 #define DROP_ACTIVE BIT(3) 4028 #define DROP_FREED BIT(4) 4029 #define DROP_SHRINK_ALL BIT(5) 4030 #define DROP_IDLE BIT(6) 4031 #define DROP_ALL (DROP_UNBOUND | \ 4032 DROP_BOUND | \ 4033 DROP_RETIRE | \ 4034 DROP_ACTIVE | \ 4035 DROP_FREED | \ 4036 DROP_SHRINK_ALL |\ 4037 DROP_IDLE) 4038 static int 4039 i915_drop_caches_get(void *data, u64 *val) 4040 { 4041 *val = DROP_ALL; 4042 4043 return 0; 4044 } 4045 4046 static int 4047 i915_drop_caches_set(void *data, u64 val) 4048 { 4049 struct drm_i915_private *dev_priv = data; 4050 struct drm_device *dev = &dev_priv->drm; 4051 int ret = 0; 4052 4053 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", 4054 val, val & DROP_ALL); 4055 4056 /* No need to check and wait for gpu resets, only libdrm auto-restarts 4057 * on ioctls on -EAGAIN. */ 4058 if (val & (DROP_ACTIVE | DROP_RETIRE)) { 4059 ret = mutex_lock_interruptible(&dev->struct_mutex); 4060 if (ret) 4061 return ret; 4062 4063 if (val & DROP_ACTIVE) 4064 ret = i915_gem_wait_for_idle(dev_priv, 4065 I915_WAIT_INTERRUPTIBLE | 4066 I915_WAIT_LOCKED); 4067 4068 if (val & DROP_RETIRE) 4069 i915_gem_retire_requests(dev_priv); 4070 4071 mutex_unlock(&dev->struct_mutex); 4072 } 4073 4074 fs_reclaim_acquire(GFP_KERNEL); 4075 if (val & DROP_BOUND) 4076 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_BOUND); 4077 4078 if (val & DROP_UNBOUND) 4079 i915_gem_shrink(dev_priv, LONG_MAX, NULL, I915_SHRINK_UNBOUND); 4080 4081 if (val & DROP_SHRINK_ALL) 4082 i915_gem_shrink_all(dev_priv); 4083 fs_reclaim_release(GFP_KERNEL); 4084 4085 if (val & DROP_IDLE) 4086 drain_delayed_work(&dev_priv->gt.idle_work); 4087 4088 if (val & DROP_FREED) { 4089 synchronize_rcu(); 4090 i915_gem_drain_freed_objects(dev_priv); 4091 } 4092 4093 return ret; 4094 } 4095 4096 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, 4097 i915_drop_caches_get, i915_drop_caches_set, 4098 "0x%08llx\n"); 4099 4100 static int 4101 i915_max_freq_get(void *data, u64 *val) 4102 { 4103 struct drm_i915_private *dev_priv = data; 4104 4105 if (INTEL_GEN(dev_priv) < 6) 4106 return -ENODEV; 4107 4108 *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.max_freq_softlimit); 4109 return 0; 4110 } 4111 4112 static int 4113 i915_max_freq_set(void *data, u64 val) 4114 { 4115 struct drm_i915_private *dev_priv = data; 4116 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4117 u32 hw_max, hw_min; 4118 int ret; 4119 4120 if (INTEL_GEN(dev_priv) < 6) 4121 return -ENODEV; 4122 4123 DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); 4124 4125 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 4126 if (ret) 4127 return ret; 4128 4129 /* 4130 * Turbo will still be enabled, but won't go above the set value. 4131 */ 4132 val = intel_freq_opcode(dev_priv, val); 4133 4134 hw_max = rps->max_freq; 4135 hw_min = rps->min_freq; 4136 4137 if (val < hw_min || val > hw_max || val < rps->min_freq_softlimit) { 4138 mutex_unlock(&dev_priv->pcu_lock); 4139 return -EINVAL; 4140 } 4141 4142 rps->max_freq_softlimit = val; 4143 4144 if (intel_set_rps(dev_priv, val)) 4145 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4146 4147 mutex_unlock(&dev_priv->pcu_lock); 4148 4149 return 0; 4150 } 4151 4152 DEFINE_SIMPLE_ATTRIBUTE(i915_max_freq_fops, 4153 i915_max_freq_get, i915_max_freq_set, 4154 "%llu\n"); 4155 4156 static int 4157 i915_min_freq_get(void *data, u64 *val) 4158 { 4159 struct drm_i915_private *dev_priv = data; 4160 4161 if (INTEL_GEN(dev_priv) < 6) 4162 return -ENODEV; 4163 4164 *val = intel_gpu_freq(dev_priv, dev_priv->gt_pm.rps.min_freq_softlimit); 4165 return 0; 4166 } 4167 4168 static int 4169 i915_min_freq_set(void *data, u64 val) 4170 { 4171 struct drm_i915_private *dev_priv = data; 4172 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4173 u32 hw_max, hw_min; 4174 int ret; 4175 4176 if (INTEL_GEN(dev_priv) < 6) 4177 return -ENODEV; 4178 4179 DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); 4180 4181 ret = mutex_lock_interruptible(&dev_priv->pcu_lock); 4182 if (ret) 4183 return ret; 4184 4185 /* 4186 * Turbo will still be enabled, but won't go below the set value. 4187 */ 4188 val = intel_freq_opcode(dev_priv, val); 4189 4190 hw_max = rps->max_freq; 4191 hw_min = rps->min_freq; 4192 4193 if (val < hw_min || 4194 val > hw_max || val > rps->max_freq_softlimit) { 4195 mutex_unlock(&dev_priv->pcu_lock); 4196 return -EINVAL; 4197 } 4198 4199 rps->min_freq_softlimit = val; 4200 4201 if (intel_set_rps(dev_priv, val)) 4202 DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); 4203 4204 mutex_unlock(&dev_priv->pcu_lock); 4205 4206 return 0; 4207 } 4208 4209 DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, 4210 i915_min_freq_get, i915_min_freq_set, 4211 "%llu\n"); 4212 4213 static int 4214 i915_cache_sharing_get(void *data, u64 *val) 4215 { 4216 struct drm_i915_private *dev_priv = data; 4217 u32 snpcr; 4218 4219 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4220 return -ENODEV; 4221 4222 intel_runtime_pm_get(dev_priv); 4223 4224 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4225 4226 intel_runtime_pm_put(dev_priv); 4227 4228 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 4229 4230 return 0; 4231 } 4232 4233 static int 4234 i915_cache_sharing_set(void *data, u64 val) 4235 { 4236 struct drm_i915_private *dev_priv = data; 4237 u32 snpcr; 4238 4239 if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv))) 4240 return -ENODEV; 4241 4242 if (val > 3) 4243 return -EINVAL; 4244 4245 intel_runtime_pm_get(dev_priv); 4246 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val); 4247 4248 /* Update the cache sharing policy here as well */ 4249 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 4250 snpcr &= ~GEN6_MBC_SNPCR_MASK; 4251 snpcr |= (val << GEN6_MBC_SNPCR_SHIFT); 4252 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 4253 4254 intel_runtime_pm_put(dev_priv); 4255 return 0; 4256 } 4257 4258 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, 4259 i915_cache_sharing_get, i915_cache_sharing_set, 4260 "%llu\n"); 4261 4262 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv, 4263 struct sseu_dev_info *sseu) 4264 { 4265 int ss_max = 2; 4266 int ss; 4267 u32 sig1[ss_max], sig2[ss_max]; 4268 4269 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); 4270 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); 4271 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); 4272 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); 4273 4274 for (ss = 0; ss < ss_max; ss++) { 4275 unsigned int eu_cnt; 4276 4277 if (sig1[ss] & CHV_SS_PG_ENABLE) 4278 /* skip disabled subslice */ 4279 continue; 4280 4281 sseu->slice_mask = BIT(0); 4282 sseu->subslice_mask |= BIT(ss); 4283 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + 4284 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + 4285 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + 4286 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); 4287 sseu->eu_total += eu_cnt; 4288 sseu->eu_per_subslice = max_t(unsigned int, 4289 sseu->eu_per_subslice, eu_cnt); 4290 } 4291 } 4292 4293 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, 4294 struct sseu_dev_info *sseu) 4295 { 4296 const struct intel_device_info *info = INTEL_INFO(dev_priv); 4297 int s_max = 6, ss_max = 4; 4298 int s, ss; 4299 u32 s_reg[s_max], eu_reg[2 * s_max], eu_mask[2]; 4300 4301 for (s = 0; s < s_max; s++) { 4302 /* 4303 * FIXME: Valid SS Mask respects the spec and read 4304 * only valid bits for those registers, excluding reserverd 4305 * although this seems wrong because it would leave many 4306 * subslices without ACK. 4307 */ 4308 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) & 4309 GEN10_PGCTL_VALID_SS_MASK(s); 4310 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s)); 4311 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s)); 4312 } 4313 4314 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 4315 GEN9_PGCTL_SSA_EU19_ACK | 4316 GEN9_PGCTL_SSA_EU210_ACK | 4317 GEN9_PGCTL_SSA_EU311_ACK; 4318 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 4319 GEN9_PGCTL_SSB_EU19_ACK | 4320 GEN9_PGCTL_SSB_EU210_ACK | 4321 GEN9_PGCTL_SSB_EU311_ACK; 4322 4323 for (s = 0; s < s_max; s++) { 4324 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4325 /* skip disabled slice */ 4326 continue; 4327 4328 sseu->slice_mask |= BIT(s); 4329 sseu->subslice_mask = info->sseu.subslice_mask; 4330 4331 for (ss = 0; ss < ss_max; ss++) { 4332 unsigned int eu_cnt; 4333 4334 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 4335 /* skip disabled subslice */ 4336 continue; 4337 4338 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] & 4339 eu_mask[ss % 2]); 4340 sseu->eu_total += eu_cnt; 4341 sseu->eu_per_subslice = max_t(unsigned int, 4342 sseu->eu_per_subslice, 4343 eu_cnt); 4344 } 4345 } 4346 } 4347 4348 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, 4349 struct sseu_dev_info *sseu) 4350 { 4351 int s_max = 3, ss_max = 4; 4352 int s, ss; 4353 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 4354 4355 /* BXT has a single slice and at most 3 subslices. */ 4356 if (IS_GEN9_LP(dev_priv)) { 4357 s_max = 1; 4358 ss_max = 3; 4359 } 4360 4361 for (s = 0; s < s_max; s++) { 4362 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); 4363 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); 4364 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); 4365 } 4366 4367 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | 4368 GEN9_PGCTL_SSA_EU19_ACK | 4369 GEN9_PGCTL_SSA_EU210_ACK | 4370 GEN9_PGCTL_SSA_EU311_ACK; 4371 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | 4372 GEN9_PGCTL_SSB_EU19_ACK | 4373 GEN9_PGCTL_SSB_EU210_ACK | 4374 GEN9_PGCTL_SSB_EU311_ACK; 4375 4376 for (s = 0; s < s_max; s++) { 4377 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) 4378 /* skip disabled slice */ 4379 continue; 4380 4381 sseu->slice_mask |= BIT(s); 4382 4383 if (IS_GEN9_BC(dev_priv)) 4384 sseu->subslice_mask = 4385 INTEL_INFO(dev_priv)->sseu.subslice_mask; 4386 4387 for (ss = 0; ss < ss_max; ss++) { 4388 unsigned int eu_cnt; 4389 4390 if (IS_GEN9_LP(dev_priv)) { 4391 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) 4392 /* skip disabled subslice */ 4393 continue; 4394 4395 sseu->subslice_mask |= BIT(ss); 4396 } 4397 4398 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & 4399 eu_mask[ss%2]); 4400 sseu->eu_total += eu_cnt; 4401 sseu->eu_per_subslice = max_t(unsigned int, 4402 sseu->eu_per_subslice, 4403 eu_cnt); 4404 } 4405 } 4406 } 4407 4408 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, 4409 struct sseu_dev_info *sseu) 4410 { 4411 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 4412 int s; 4413 4414 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; 4415 4416 if (sseu->slice_mask) { 4417 sseu->subslice_mask = INTEL_INFO(dev_priv)->sseu.subslice_mask; 4418 sseu->eu_per_subslice = 4419 INTEL_INFO(dev_priv)->sseu.eu_per_subslice; 4420 sseu->eu_total = sseu->eu_per_subslice * 4421 sseu_subslice_total(sseu); 4422 4423 /* subtract fused off EU(s) from enabled slice(s) */ 4424 for (s = 0; s < fls(sseu->slice_mask); s++) { 4425 u8 subslice_7eu = 4426 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s]; 4427 4428 sseu->eu_total -= hweight8(subslice_7eu); 4429 } 4430 } 4431 } 4432 4433 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, 4434 const struct sseu_dev_info *sseu) 4435 { 4436 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4437 const char *type = is_available_info ? "Available" : "Enabled"; 4438 4439 seq_printf(m, " %s Slice Mask: %04x\n", type, 4440 sseu->slice_mask); 4441 seq_printf(m, " %s Slice Total: %u\n", type, 4442 hweight8(sseu->slice_mask)); 4443 seq_printf(m, " %s Subslice Total: %u\n", type, 4444 sseu_subslice_total(sseu)); 4445 seq_printf(m, " %s Subslice Mask: %04x\n", type, 4446 sseu->subslice_mask); 4447 seq_printf(m, " %s Subslice Per Slice: %u\n", type, 4448 hweight8(sseu->subslice_mask)); 4449 seq_printf(m, " %s EU Total: %u\n", type, 4450 sseu->eu_total); 4451 seq_printf(m, " %s EU Per Subslice: %u\n", type, 4452 sseu->eu_per_subslice); 4453 4454 if (!is_available_info) 4455 return; 4456 4457 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv))); 4458 if (HAS_POOLED_EU(dev_priv)) 4459 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool); 4460 4461 seq_printf(m, " Has Slice Power Gating: %s\n", 4462 yesno(sseu->has_slice_pg)); 4463 seq_printf(m, " Has Subslice Power Gating: %s\n", 4464 yesno(sseu->has_subslice_pg)); 4465 seq_printf(m, " Has EU Power Gating: %s\n", 4466 yesno(sseu->has_eu_pg)); 4467 } 4468 4469 static int i915_sseu_status(struct seq_file *m, void *unused) 4470 { 4471 struct drm_i915_private *dev_priv = node_to_i915(m->private); 4472 struct sseu_dev_info sseu; 4473 4474 if (INTEL_GEN(dev_priv) < 8) 4475 return -ENODEV; 4476 4477 seq_puts(m, "SSEU Device Info\n"); 4478 i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu); 4479 4480 seq_puts(m, "SSEU Device Status\n"); 4481 memset(&sseu, 0, sizeof(sseu)); 4482 4483 intel_runtime_pm_get(dev_priv); 4484 4485 if (IS_CHERRYVIEW(dev_priv)) { 4486 cherryview_sseu_device_status(dev_priv, &sseu); 4487 } else if (IS_BROADWELL(dev_priv)) { 4488 broadwell_sseu_device_status(dev_priv, &sseu); 4489 } else if (IS_GEN9(dev_priv)) { 4490 gen9_sseu_device_status(dev_priv, &sseu); 4491 } else if (INTEL_GEN(dev_priv) >= 10) { 4492 gen10_sseu_device_status(dev_priv, &sseu); 4493 } 4494 4495 intel_runtime_pm_put(dev_priv); 4496 4497 i915_print_sseu_info(m, false, &sseu); 4498 4499 return 0; 4500 } 4501 4502 static int i915_forcewake_open(struct inode *inode, struct file *file) 4503 { 4504 struct drm_i915_private *i915 = inode->i_private; 4505 4506 if (INTEL_GEN(i915) < 6) 4507 return 0; 4508 4509 intel_runtime_pm_get(i915); 4510 intel_uncore_forcewake_user_get(i915); 4511 4512 return 0; 4513 } 4514 4515 static int i915_forcewake_release(struct inode *inode, struct file *file) 4516 { 4517 struct drm_i915_private *i915 = inode->i_private; 4518 4519 if (INTEL_GEN(i915) < 6) 4520 return 0; 4521 4522 intel_uncore_forcewake_user_put(i915); 4523 intel_runtime_pm_put(i915); 4524 4525 return 0; 4526 } 4527 4528 static const struct file_operations i915_forcewake_fops = { 4529 .owner = THIS_MODULE, 4530 .open = i915_forcewake_open, 4531 .release = i915_forcewake_release, 4532 }; 4533 4534 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) 4535 { 4536 struct drm_i915_private *dev_priv = m->private; 4537 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4538 4539 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); 4540 seq_printf(m, "Detected: %s\n", 4541 yesno(delayed_work_pending(&hotplug->reenable_work))); 4542 4543 return 0; 4544 } 4545 4546 static ssize_t i915_hpd_storm_ctl_write(struct file *file, 4547 const char __user *ubuf, size_t len, 4548 loff_t *offp) 4549 { 4550 struct seq_file *m = file->private_data; 4551 struct drm_i915_private *dev_priv = m->private; 4552 struct i915_hotplug *hotplug = &dev_priv->hotplug; 4553 unsigned int new_threshold; 4554 int i; 4555 char *newline; 4556 char tmp[16]; 4557 4558 if (len >= sizeof(tmp)) 4559 return -EINVAL; 4560 4561 if (copy_from_user(tmp, ubuf, len)) 4562 return -EFAULT; 4563 4564 tmp[len] = '\0'; 4565 4566 /* Strip newline, if any */ 4567 newline = strchr(tmp, '\n'); 4568 if (newline) 4569 *newline = '\0'; 4570 4571 if (strcmp(tmp, "reset") == 0) 4572 new_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4573 else if (kstrtouint(tmp, 10, &new_threshold) != 0) 4574 return -EINVAL; 4575 4576 if (new_threshold > 0) 4577 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n", 4578 new_threshold); 4579 else 4580 DRM_DEBUG_KMS("Disabling HPD storm detection\n"); 4581 4582 spin_lock_irq(&dev_priv->irq_lock); 4583 hotplug->hpd_storm_threshold = new_threshold; 4584 /* Reset the HPD storm stats so we don't accidentally trigger a storm */ 4585 for_each_hpd_pin(i) 4586 hotplug->stats[i].count = 0; 4587 spin_unlock_irq(&dev_priv->irq_lock); 4588 4589 /* Re-enable hpd immediately if we were in an irq storm */ 4590 flush_delayed_work(&dev_priv->hotplug.reenable_work); 4591 4592 return len; 4593 } 4594 4595 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file) 4596 { 4597 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private); 4598 } 4599 4600 static const struct file_operations i915_hpd_storm_ctl_fops = { 4601 .owner = THIS_MODULE, 4602 .open = i915_hpd_storm_ctl_open, 4603 .read = seq_read, 4604 .llseek = seq_lseek, 4605 .release = single_release, 4606 .write = i915_hpd_storm_ctl_write 4607 }; 4608 4609 static const struct drm_info_list i915_debugfs_list[] = { 4610 {"i915_capabilities", i915_capabilities, 0}, 4611 {"i915_gem_objects", i915_gem_object_info, 0}, 4612 {"i915_gem_gtt", i915_gem_gtt_info, 0}, 4613 {"i915_gem_stolen", i915_gem_stolen_list_info }, 4614 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 4615 {"i915_gem_interrupt", i915_interrupt_info, 0}, 4616 {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, 4617 {"i915_guc_info", i915_guc_info, 0}, 4618 {"i915_guc_load_status", i915_guc_load_status_info, 0}, 4619 {"i915_guc_log_dump", i915_guc_log_dump, 0}, 4620 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1}, 4621 {"i915_guc_stage_pool", i915_guc_stage_pool, 0}, 4622 {"i915_huc_load_status", i915_huc_load_status_info, 0}, 4623 {"i915_frequency_info", i915_frequency_info, 0}, 4624 {"i915_hangcheck_info", i915_hangcheck_info, 0}, 4625 {"i915_reset_info", i915_reset_info, 0}, 4626 {"i915_drpc_info", i915_drpc_info, 0}, 4627 {"i915_emon_status", i915_emon_status, 0}, 4628 {"i915_ring_freq_table", i915_ring_freq_table, 0}, 4629 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, 4630 {"i915_fbc_status", i915_fbc_status, 0}, 4631 {"i915_ips_status", i915_ips_status, 0}, 4632 {"i915_sr_status", i915_sr_status, 0}, 4633 {"i915_opregion", i915_opregion, 0}, 4634 {"i915_vbt", i915_vbt, 0}, 4635 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0}, 4636 {"i915_context_status", i915_context_status, 0}, 4637 {"i915_forcewake_domains", i915_forcewake_domains, 0}, 4638 {"i915_swizzle_info", i915_swizzle_info, 0}, 4639 {"i915_ppgtt_info", i915_ppgtt_info, 0}, 4640 {"i915_llc", i915_llc, 0}, 4641 {"i915_edp_psr_status", i915_edp_psr_status, 0}, 4642 {"i915_sink_crc_eDP1", i915_sink_crc, 0}, 4643 {"i915_energy_uJ", i915_energy_uJ, 0}, 4644 {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, 4645 {"i915_power_domain_info", i915_power_domain_info, 0}, 4646 {"i915_dmc_info", i915_dmc_info, 0}, 4647 {"i915_display_info", i915_display_info, 0}, 4648 {"i915_engine_info", i915_engine_info, 0}, 4649 {"i915_shrinker_info", i915_shrinker_info, 0}, 4650 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4651 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4652 {"i915_wa_registers", i915_wa_registers, 0}, 4653 {"i915_ddb_info", i915_ddb_info, 0}, 4654 {"i915_sseu_status", i915_sseu_status, 0}, 4655 {"i915_drrs_status", i915_drrs_status, 0}, 4656 {"i915_rps_boost_info", i915_rps_boost_info, 0}, 4657 }; 4658 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4659 4660 static const struct i915_debugfs_files { 4661 const char *name; 4662 const struct file_operations *fops; 4663 } i915_debugfs_files[] = { 4664 {"i915_wedged", &i915_wedged_fops}, 4665 {"i915_max_freq", &i915_max_freq_fops}, 4666 {"i915_min_freq", &i915_min_freq_fops}, 4667 {"i915_cache_sharing", &i915_cache_sharing_fops}, 4668 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 4669 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 4670 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 4671 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 4672 {"i915_error_state", &i915_error_state_fops}, 4673 {"i915_gpu_info", &i915_gpu_info_fops}, 4674 #endif 4675 {"i915_next_seqno", &i915_next_seqno_fops}, 4676 {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, 4677 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, 4678 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, 4679 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, 4680 {"i915_fbc_false_color", &i915_fbc_false_color_fops}, 4681 {"i915_dp_test_data", &i915_displayport_test_data_fops}, 4682 {"i915_dp_test_type", &i915_displayport_test_type_fops}, 4683 {"i915_dp_test_active", &i915_displayport_test_active_fops}, 4684 {"i915_guc_log_control", &i915_guc_log_control_fops}, 4685 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, 4686 {"i915_ipc_status", &i915_ipc_status_fops} 4687 }; 4688 4689 int i915_debugfs_register(struct drm_i915_private *dev_priv) 4690 { 4691 struct drm_minor *minor = dev_priv->drm.primary; 4692 struct dentry *ent; 4693 int ret, i; 4694 4695 ent = debugfs_create_file("i915_forcewake_user", S_IRUSR, 4696 minor->debugfs_root, to_i915(minor->dev), 4697 &i915_forcewake_fops); 4698 if (!ent) 4699 return -ENOMEM; 4700 4701 ret = intel_pipe_crc_create(minor); 4702 if (ret) 4703 return ret; 4704 4705 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { 4706 ent = debugfs_create_file(i915_debugfs_files[i].name, 4707 S_IRUGO | S_IWUSR, 4708 minor->debugfs_root, 4709 to_i915(minor->dev), 4710 i915_debugfs_files[i].fops); 4711 if (!ent) 4712 return -ENOMEM; 4713 } 4714 4715 return drm_debugfs_create_files(i915_debugfs_list, 4716 I915_DEBUGFS_ENTRIES, 4717 minor->debugfs_root, minor); 4718 } 4719 4720 struct dpcd_block { 4721 /* DPCD dump start address. */ 4722 unsigned int offset; 4723 /* DPCD dump end address, inclusive. If unset, .size will be used. */ 4724 unsigned int end; 4725 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ 4726 size_t size; 4727 /* Only valid for eDP. */ 4728 bool edp; 4729 }; 4730 4731 static const struct dpcd_block i915_dpcd_debug[] = { 4732 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, 4733 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, 4734 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, 4735 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, 4736 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, 4737 { .offset = DP_SET_POWER }, 4738 { .offset = DP_EDP_DPCD_REV }, 4739 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, 4740 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, 4741 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, 4742 }; 4743 4744 static int i915_dpcd_show(struct seq_file *m, void *data) 4745 { 4746 struct drm_connector *connector = m->private; 4747 struct intel_dp *intel_dp = 4748 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4749 uint8_t buf[16]; 4750 ssize_t err; 4751 int i; 4752 4753 if (connector->status != connector_status_connected) 4754 return -ENODEV; 4755 4756 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) { 4757 const struct dpcd_block *b = &i915_dpcd_debug[i]; 4758 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); 4759 4760 if (b->edp && 4761 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 4762 continue; 4763 4764 /* low tech for now */ 4765 if (WARN_ON(size > sizeof(buf))) 4766 continue; 4767 4768 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); 4769 if (err <= 0) { 4770 DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", 4771 size, b->offset, err); 4772 continue; 4773 } 4774 4775 seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); 4776 } 4777 4778 return 0; 4779 } 4780 4781 static int i915_dpcd_open(struct inode *inode, struct file *file) 4782 { 4783 return single_open(file, i915_dpcd_show, inode->i_private); 4784 } 4785 4786 static const struct file_operations i915_dpcd_fops = { 4787 .owner = THIS_MODULE, 4788 .open = i915_dpcd_open, 4789 .read = seq_read, 4790 .llseek = seq_lseek, 4791 .release = single_release, 4792 }; 4793 4794 static int i915_panel_show(struct seq_file *m, void *data) 4795 { 4796 struct drm_connector *connector = m->private; 4797 struct intel_dp *intel_dp = 4798 enc_to_intel_dp(&intel_attached_encoder(connector)->base); 4799 4800 if (connector->status != connector_status_connected) 4801 return -ENODEV; 4802 4803 seq_printf(m, "Panel power up delay: %d\n", 4804 intel_dp->panel_power_up_delay); 4805 seq_printf(m, "Panel power down delay: %d\n", 4806 intel_dp->panel_power_down_delay); 4807 seq_printf(m, "Backlight on delay: %d\n", 4808 intel_dp->backlight_on_delay); 4809 seq_printf(m, "Backlight off delay: %d\n", 4810 intel_dp->backlight_off_delay); 4811 4812 return 0; 4813 } 4814 4815 static int i915_panel_open(struct inode *inode, struct file *file) 4816 { 4817 return single_open(file, i915_panel_show, inode->i_private); 4818 } 4819 4820 static const struct file_operations i915_panel_fops = { 4821 .owner = THIS_MODULE, 4822 .open = i915_panel_open, 4823 .read = seq_read, 4824 .llseek = seq_lseek, 4825 .release = single_release, 4826 }; 4827 4828 /** 4829 * i915_debugfs_connector_add - add i915 specific connector debugfs files 4830 * @connector: pointer to a registered drm_connector 4831 * 4832 * Cleanup will be done by drm_connector_unregister() through a call to 4833 * drm_debugfs_connector_remove(). 4834 * 4835 * Returns 0 on success, negative error codes on error. 4836 */ 4837 int i915_debugfs_connector_add(struct drm_connector *connector) 4838 { 4839 struct dentry *root = connector->debugfs_entry; 4840 4841 /* The connector must have been registered beforehands. */ 4842 if (!root) 4843 return -ENODEV; 4844 4845 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || 4846 connector->connector_type == DRM_MODE_CONNECTOR_eDP) 4847 debugfs_create_file("i915_dpcd", S_IRUGO, root, 4848 connector, &i915_dpcd_fops); 4849 4850 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 4851 debugfs_create_file("i915_panel_timings", S_IRUGO, root, 4852 connector, &i915_panel_fops); 4853 4854 return 0; 4855 } 4856